mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 01:53:15 +07:00
55834c5909
Quarantine isolates freed objects in a separate queue. The objects are returned to the allocator later, which helps to detect use-after-free errors. When the object is freed, its state changes from KASAN_STATE_ALLOC to KASAN_STATE_QUARANTINE. The object is poisoned and put into quarantine instead of being returned to the allocator, therefore every subsequent access to that object triggers a KASAN error, and the error handler is able to say where the object has been allocated and deallocated. When it's time for the object to leave quarantine, its state becomes KASAN_STATE_FREE and it's returned to the allocator. From now on the allocator may reuse it for another allocation. Before that happens, it's still possible to detect a use-after free on that object (it retains the allocation/deallocation stacks). When the allocator reuses this object, the shadow is unpoisoned and old allocation/deallocation stacks are wiped. Therefore a use of this object, even an incorrect one, won't trigger ASan warning. Without the quarantine, it's not guaranteed that the objects aren't reused immediately, that's why the probability of catching a use-after-free is lower than with quarantine in place. Quarantine isolates freed objects in a separate queue. The objects are returned to the allocator later, which helps to detect use-after-free errors. Freed objects are first added to per-cpu quarantine queues. When a cache is destroyed or memory shrinking is requested, the objects are moved into the global quarantine queue. Whenever a kmalloc call allows memory reclaiming, the oldest objects are popped out of the global queue until the total size of objects in quarantine is less than 3/4 of the maximum quarantine size (which is a fraction of installed physical memory). As long as an object remains in the quarantine, KASAN is able to report accesses to it, so the chance of reporting a use-after-free is increased. Once the object leaves quarantine, the allocator may reuse it, in which case the object is unpoisoned and KASAN can't detect incorrect accesses to it. Right now quarantine support is only enabled in SLAB allocator. Unification of KASAN features in SLAB and SLUB will be done later. This patch is based on the "mm: kasan: quarantine" patch originally prepared by Dmitry Chernenkov. A number of improvements have been suggested by Andrey Ryabinin. [glider@google.com: v9] Link: http://lkml.kernel.org/r/1462987130-144092-1-git-send-email-glider@google.com Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1287 lines
30 KiB
C
1287 lines
30 KiB
C
/*
|
|
* Slab allocator functions that are independent of the allocator strategy
|
|
*
|
|
* (C) 2012 Christoph Lameter <cl@linux.com>
|
|
*/
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/poison.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/module.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/page.h>
|
|
#include <linux/memcontrol.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/kmem.h>
|
|
|
|
#include "slab.h"
|
|
|
|
enum slab_state slab_state;
|
|
LIST_HEAD(slab_caches);
|
|
DEFINE_MUTEX(slab_mutex);
|
|
struct kmem_cache *kmem_cache;
|
|
|
|
/*
|
|
* Set of flags that will prevent slab merging
|
|
*/
|
|
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
|
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
|
|
SLAB_FAILSLAB | SLAB_KASAN)
|
|
|
|
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
|
|
SLAB_NOTRACK | SLAB_ACCOUNT)
|
|
|
|
/*
|
|
* Merge control. If this is set then no merging of slab caches will occur.
|
|
* (Could be removed. This was introduced to pacify the merge skeptics.)
|
|
*/
|
|
static int slab_nomerge;
|
|
|
|
static int __init setup_slab_nomerge(char *str)
|
|
{
|
|
slab_nomerge = 1;
|
|
return 1;
|
|
}
|
|
|
|
#ifdef CONFIG_SLUB
|
|
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
|
|
#endif
|
|
|
|
__setup("slab_nomerge", setup_slab_nomerge);
|
|
|
|
/*
|
|
* Determine the size of a slab object
|
|
*/
|
|
unsigned int kmem_cache_size(struct kmem_cache *s)
|
|
{
|
|
return s->object_size;
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_size);
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
static int kmem_cache_sanity_check(const char *name, size_t size)
|
|
{
|
|
struct kmem_cache *s = NULL;
|
|
|
|
if (!name || in_interrupt() || size < sizeof(void *) ||
|
|
size > KMALLOC_MAX_SIZE) {
|
|
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
|
|
return -EINVAL;
|
|
}
|
|
|
|
list_for_each_entry(s, &slab_caches, list) {
|
|
char tmp;
|
|
int res;
|
|
|
|
/*
|
|
* This happens when the module gets unloaded and doesn't
|
|
* destroy its slab cache and no-one else reuses the vmalloc
|
|
* area of the module. Print a warning.
|
|
*/
|
|
res = probe_kernel_address(s->name, tmp);
|
|
if (res) {
|
|
pr_err("Slab cache with size %d has lost its name\n",
|
|
s->object_size);
|
|
continue;
|
|
}
|
|
}
|
|
|
|
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
|
|
return 0;
|
|
}
|
|
#else
|
|
static inline int kmem_cache_sanity_check(const char *name, size_t size)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
|
|
{
|
|
size_t i;
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
if (s)
|
|
kmem_cache_free(s, p[i]);
|
|
else
|
|
kfree(p[i]);
|
|
}
|
|
}
|
|
|
|
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
|
|
void **p)
|
|
{
|
|
size_t i;
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
void *x = p[i] = kmem_cache_alloc(s, flags);
|
|
if (!x) {
|
|
__kmem_cache_free_bulk(s, i, p);
|
|
return 0;
|
|
}
|
|
}
|
|
return i;
|
|
}
|
|
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
|
void slab_init_memcg_params(struct kmem_cache *s)
|
|
{
|
|
s->memcg_params.is_root_cache = true;
|
|
INIT_LIST_HEAD(&s->memcg_params.list);
|
|
RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
|
|
}
|
|
|
|
static int init_memcg_params(struct kmem_cache *s,
|
|
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
|
|
{
|
|
struct memcg_cache_array *arr;
|
|
|
|
if (memcg) {
|
|
s->memcg_params.is_root_cache = false;
|
|
s->memcg_params.memcg = memcg;
|
|
s->memcg_params.root_cache = root_cache;
|
|
return 0;
|
|
}
|
|
|
|
slab_init_memcg_params(s);
|
|
|
|
if (!memcg_nr_cache_ids)
|
|
return 0;
|
|
|
|
arr = kzalloc(sizeof(struct memcg_cache_array) +
|
|
memcg_nr_cache_ids * sizeof(void *),
|
|
GFP_KERNEL);
|
|
if (!arr)
|
|
return -ENOMEM;
|
|
|
|
RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
|
|
return 0;
|
|
}
|
|
|
|
static void destroy_memcg_params(struct kmem_cache *s)
|
|
{
|
|
if (is_root_cache(s))
|
|
kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
|
|
}
|
|
|
|
static int update_memcg_params(struct kmem_cache *s, int new_array_size)
|
|
{
|
|
struct memcg_cache_array *old, *new;
|
|
|
|
if (!is_root_cache(s))
|
|
return 0;
|
|
|
|
new = kzalloc(sizeof(struct memcg_cache_array) +
|
|
new_array_size * sizeof(void *), GFP_KERNEL);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
|
|
old = rcu_dereference_protected(s->memcg_params.memcg_caches,
|
|
lockdep_is_held(&slab_mutex));
|
|
if (old)
|
|
memcpy(new->entries, old->entries,
|
|
memcg_nr_cache_ids * sizeof(void *));
|
|
|
|
rcu_assign_pointer(s->memcg_params.memcg_caches, new);
|
|
if (old)
|
|
kfree_rcu(old, rcu);
|
|
return 0;
|
|
}
|
|
|
|
int memcg_update_all_caches(int num_memcgs)
|
|
{
|
|
struct kmem_cache *s;
|
|
int ret = 0;
|
|
|
|
mutex_lock(&slab_mutex);
|
|
list_for_each_entry(s, &slab_caches, list) {
|
|
ret = update_memcg_params(s, num_memcgs);
|
|
/*
|
|
* Instead of freeing the memory, we'll just leave the caches
|
|
* up to this point in an updated state.
|
|
*/
|
|
if (ret)
|
|
break;
|
|
}
|
|
mutex_unlock(&slab_mutex);
|
|
return ret;
|
|
}
|
|
#else
|
|
static inline int init_memcg_params(struct kmem_cache *s,
|
|
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void destroy_memcg_params(struct kmem_cache *s)
|
|
{
|
|
}
|
|
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
|
|
|
|
/*
|
|
* Find a mergeable slab cache
|
|
*/
|
|
int slab_unmergeable(struct kmem_cache *s)
|
|
{
|
|
if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
|
|
return 1;
|
|
|
|
if (!is_root_cache(s))
|
|
return 1;
|
|
|
|
if (s->ctor)
|
|
return 1;
|
|
|
|
/*
|
|
* We may have set a slab to be unmergeable during bootstrap.
|
|
*/
|
|
if (s->refcount < 0)
|
|
return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct kmem_cache *find_mergeable(size_t size, size_t align,
|
|
unsigned long flags, const char *name, void (*ctor)(void *))
|
|
{
|
|
struct kmem_cache *s;
|
|
|
|
if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
|
|
return NULL;
|
|
|
|
if (ctor)
|
|
return NULL;
|
|
|
|
size = ALIGN(size, sizeof(void *));
|
|
align = calculate_alignment(flags, align, size);
|
|
size = ALIGN(size, align);
|
|
flags = kmem_cache_flags(size, flags, name, NULL);
|
|
|
|
list_for_each_entry_reverse(s, &slab_caches, list) {
|
|
if (slab_unmergeable(s))
|
|
continue;
|
|
|
|
if (size > s->size)
|
|
continue;
|
|
|
|
if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
|
|
continue;
|
|
/*
|
|
* Check if alignment is compatible.
|
|
* Courtesy of Adrian Drzewiecki
|
|
*/
|
|
if ((s->size & ~(align - 1)) != s->size)
|
|
continue;
|
|
|
|
if (s->size - size >= sizeof(void *))
|
|
continue;
|
|
|
|
if (IS_ENABLED(CONFIG_SLAB) && align &&
|
|
(align > s->align || s->align % align))
|
|
continue;
|
|
|
|
return s;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Figure out what the alignment of the objects will be given a set of
|
|
* flags, a user specified alignment and the size of the objects.
|
|
*/
|
|
unsigned long calculate_alignment(unsigned long flags,
|
|
unsigned long align, unsigned long size)
|
|
{
|
|
/*
|
|
* If the user wants hardware cache aligned objects then follow that
|
|
* suggestion if the object is sufficiently large.
|
|
*
|
|
* The hardware cache alignment cannot override the specified
|
|
* alignment though. If that is greater then use it.
|
|
*/
|
|
if (flags & SLAB_HWCACHE_ALIGN) {
|
|
unsigned long ralign = cache_line_size();
|
|
while (size <= ralign / 2)
|
|
ralign /= 2;
|
|
align = max(align, ralign);
|
|
}
|
|
|
|
if (align < ARCH_SLAB_MINALIGN)
|
|
align = ARCH_SLAB_MINALIGN;
|
|
|
|
return ALIGN(align, sizeof(void *));
|
|
}
|
|
|
|
static struct kmem_cache *create_cache(const char *name,
|
|
size_t object_size, size_t size, size_t align,
|
|
unsigned long flags, void (*ctor)(void *),
|
|
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
|
|
{
|
|
struct kmem_cache *s;
|
|
int err;
|
|
|
|
err = -ENOMEM;
|
|
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
|
|
if (!s)
|
|
goto out;
|
|
|
|
s->name = name;
|
|
s->object_size = object_size;
|
|
s->size = size;
|
|
s->align = align;
|
|
s->ctor = ctor;
|
|
|
|
err = init_memcg_params(s, memcg, root_cache);
|
|
if (err)
|
|
goto out_free_cache;
|
|
|
|
err = __kmem_cache_create(s, flags);
|
|
if (err)
|
|
goto out_free_cache;
|
|
|
|
s->refcount = 1;
|
|
list_add(&s->list, &slab_caches);
|
|
out:
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
return s;
|
|
|
|
out_free_cache:
|
|
destroy_memcg_params(s);
|
|
kmem_cache_free(kmem_cache, s);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* kmem_cache_create - Create a cache.
|
|
* @name: A string which is used in /proc/slabinfo to identify this cache.
|
|
* @size: The size of objects to be created in this cache.
|
|
* @align: The required alignment for the objects.
|
|
* @flags: SLAB flags
|
|
* @ctor: A constructor for the objects.
|
|
*
|
|
* Returns a ptr to the cache on success, NULL on failure.
|
|
* Cannot be called within a interrupt, but can be interrupted.
|
|
* The @ctor is run when new pages are allocated by the cache.
|
|
*
|
|
* The flags are
|
|
*
|
|
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
|
* to catch references to uninitialised memory.
|
|
*
|
|
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
|
|
* for buffer overruns.
|
|
*
|
|
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
|
|
* cacheline. This can be beneficial if you're counting cycles as closely
|
|
* as davem.
|
|
*/
|
|
struct kmem_cache *
|
|
kmem_cache_create(const char *name, size_t size, size_t align,
|
|
unsigned long flags, void (*ctor)(void *))
|
|
{
|
|
struct kmem_cache *s = NULL;
|
|
const char *cache_name;
|
|
int err;
|
|
|
|
get_online_cpus();
|
|
get_online_mems();
|
|
memcg_get_cache_ids();
|
|
|
|
mutex_lock(&slab_mutex);
|
|
|
|
err = kmem_cache_sanity_check(name, size);
|
|
if (err) {
|
|
goto out_unlock;
|
|
}
|
|
|
|
/*
|
|
* Some allocators will constraint the set of valid flags to a subset
|
|
* of all flags. We expect them to define CACHE_CREATE_MASK in this
|
|
* case, and we'll just provide them with a sanitized version of the
|
|
* passed flags.
|
|
*/
|
|
flags &= CACHE_CREATE_MASK;
|
|
|
|
s = __kmem_cache_alias(name, size, align, flags, ctor);
|
|
if (s)
|
|
goto out_unlock;
|
|
|
|
cache_name = kstrdup_const(name, GFP_KERNEL);
|
|
if (!cache_name) {
|
|
err = -ENOMEM;
|
|
goto out_unlock;
|
|
}
|
|
|
|
s = create_cache(cache_name, size, size,
|
|
calculate_alignment(flags, align, size),
|
|
flags, ctor, NULL, NULL);
|
|
if (IS_ERR(s)) {
|
|
err = PTR_ERR(s);
|
|
kfree_const(cache_name);
|
|
}
|
|
|
|
out_unlock:
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
memcg_put_cache_ids();
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
|
|
if (err) {
|
|
if (flags & SLAB_PANIC)
|
|
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
|
|
name, err);
|
|
else {
|
|
pr_warn("kmem_cache_create(%s) failed with error %d\n",
|
|
name, err);
|
|
dump_stack();
|
|
}
|
|
return NULL;
|
|
}
|
|
return s;
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_create);
|
|
|
|
static int shutdown_cache(struct kmem_cache *s,
|
|
struct list_head *release, bool *need_rcu_barrier)
|
|
{
|
|
if (__kmem_cache_shutdown(s) != 0)
|
|
return -EBUSY;
|
|
|
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
*need_rcu_barrier = true;
|
|
|
|
list_move(&s->list, release);
|
|
return 0;
|
|
}
|
|
|
|
static void release_caches(struct list_head *release, bool need_rcu_barrier)
|
|
{
|
|
struct kmem_cache *s, *s2;
|
|
|
|
if (need_rcu_barrier)
|
|
rcu_barrier();
|
|
|
|
list_for_each_entry_safe(s, s2, release, list) {
|
|
#ifdef SLAB_SUPPORTS_SYSFS
|
|
sysfs_slab_remove(s);
|
|
#else
|
|
slab_kmem_cache_release(s);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
|
/*
|
|
* memcg_create_kmem_cache - Create a cache for a memory cgroup.
|
|
* @memcg: The memory cgroup the new cache is for.
|
|
* @root_cache: The parent of the new cache.
|
|
*
|
|
* This function attempts to create a kmem cache that will serve allocation
|
|
* requests going from @memcg to @root_cache. The new cache inherits properties
|
|
* from its parent.
|
|
*/
|
|
void memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
struct kmem_cache *root_cache)
|
|
{
|
|
static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
|
|
struct cgroup_subsys_state *css = &memcg->css;
|
|
struct memcg_cache_array *arr;
|
|
struct kmem_cache *s = NULL;
|
|
char *cache_name;
|
|
int idx;
|
|
|
|
get_online_cpus();
|
|
get_online_mems();
|
|
|
|
mutex_lock(&slab_mutex);
|
|
|
|
/*
|
|
* The memory cgroup could have been offlined while the cache
|
|
* creation work was pending.
|
|
*/
|
|
if (memcg->kmem_state != KMEM_ONLINE)
|
|
goto out_unlock;
|
|
|
|
idx = memcg_cache_id(memcg);
|
|
arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
|
|
lockdep_is_held(&slab_mutex));
|
|
|
|
/*
|
|
* Since per-memcg caches are created asynchronously on first
|
|
* allocation (see memcg_kmem_get_cache()), several threads can try to
|
|
* create the same cache, but only one of them may succeed.
|
|
*/
|
|
if (arr->entries[idx])
|
|
goto out_unlock;
|
|
|
|
cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
|
|
cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
|
|
css->id, memcg_name_buf);
|
|
if (!cache_name)
|
|
goto out_unlock;
|
|
|
|
s = create_cache(cache_name, root_cache->object_size,
|
|
root_cache->size, root_cache->align,
|
|
root_cache->flags, root_cache->ctor,
|
|
memcg, root_cache);
|
|
/*
|
|
* If we could not create a memcg cache, do not complain, because
|
|
* that's not critical at all as we can always proceed with the root
|
|
* cache.
|
|
*/
|
|
if (IS_ERR(s)) {
|
|
kfree(cache_name);
|
|
goto out_unlock;
|
|
}
|
|
|
|
list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
|
|
|
|
/*
|
|
* Since readers won't lock (see cache_from_memcg_idx()), we need a
|
|
* barrier here to ensure nobody will see the kmem_cache partially
|
|
* initialized.
|
|
*/
|
|
smp_wmb();
|
|
arr->entries[idx] = s;
|
|
|
|
out_unlock:
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
}
|
|
|
|
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
|
|
{
|
|
int idx;
|
|
struct memcg_cache_array *arr;
|
|
struct kmem_cache *s, *c;
|
|
|
|
idx = memcg_cache_id(memcg);
|
|
|
|
get_online_cpus();
|
|
get_online_mems();
|
|
|
|
mutex_lock(&slab_mutex);
|
|
list_for_each_entry(s, &slab_caches, list) {
|
|
if (!is_root_cache(s))
|
|
continue;
|
|
|
|
arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
|
|
lockdep_is_held(&slab_mutex));
|
|
c = arr->entries[idx];
|
|
if (!c)
|
|
continue;
|
|
|
|
__kmem_cache_shrink(c, true);
|
|
arr->entries[idx] = NULL;
|
|
}
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
}
|
|
|
|
static int __shutdown_memcg_cache(struct kmem_cache *s,
|
|
struct list_head *release, bool *need_rcu_barrier)
|
|
{
|
|
BUG_ON(is_root_cache(s));
|
|
|
|
if (shutdown_cache(s, release, need_rcu_barrier))
|
|
return -EBUSY;
|
|
|
|
list_del(&s->memcg_params.list);
|
|
return 0;
|
|
}
|
|
|
|
void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
|
|
{
|
|
LIST_HEAD(release);
|
|
bool need_rcu_barrier = false;
|
|
struct kmem_cache *s, *s2;
|
|
|
|
get_online_cpus();
|
|
get_online_mems();
|
|
|
|
mutex_lock(&slab_mutex);
|
|
list_for_each_entry_safe(s, s2, &slab_caches, list) {
|
|
if (is_root_cache(s) || s->memcg_params.memcg != memcg)
|
|
continue;
|
|
/*
|
|
* The cgroup is about to be freed and therefore has no charges
|
|
* left. Hence, all its caches must be empty by now.
|
|
*/
|
|
BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
|
|
}
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
|
|
release_caches(&release, need_rcu_barrier);
|
|
}
|
|
|
|
static int shutdown_memcg_caches(struct kmem_cache *s,
|
|
struct list_head *release, bool *need_rcu_barrier)
|
|
{
|
|
struct memcg_cache_array *arr;
|
|
struct kmem_cache *c, *c2;
|
|
LIST_HEAD(busy);
|
|
int i;
|
|
|
|
BUG_ON(!is_root_cache(s));
|
|
|
|
/*
|
|
* First, shutdown active caches, i.e. caches that belong to online
|
|
* memory cgroups.
|
|
*/
|
|
arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
|
|
lockdep_is_held(&slab_mutex));
|
|
for_each_memcg_cache_index(i) {
|
|
c = arr->entries[i];
|
|
if (!c)
|
|
continue;
|
|
if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
|
|
/*
|
|
* The cache still has objects. Move it to a temporary
|
|
* list so as not to try to destroy it for a second
|
|
* time while iterating over inactive caches below.
|
|
*/
|
|
list_move(&c->memcg_params.list, &busy);
|
|
else
|
|
/*
|
|
* The cache is empty and will be destroyed soon. Clear
|
|
* the pointer to it in the memcg_caches array so that
|
|
* it will never be accessed even if the root cache
|
|
* stays alive.
|
|
*/
|
|
arr->entries[i] = NULL;
|
|
}
|
|
|
|
/*
|
|
* Second, shutdown all caches left from memory cgroups that are now
|
|
* offline.
|
|
*/
|
|
list_for_each_entry_safe(c, c2, &s->memcg_params.list,
|
|
memcg_params.list)
|
|
__shutdown_memcg_cache(c, release, need_rcu_barrier);
|
|
|
|
list_splice(&busy, &s->memcg_params.list);
|
|
|
|
/*
|
|
* A cache being destroyed must be empty. In particular, this means
|
|
* that all per memcg caches attached to it must be empty too.
|
|
*/
|
|
if (!list_empty(&s->memcg_params.list))
|
|
return -EBUSY;
|
|
return 0;
|
|
}
|
|
#else
|
|
static inline int shutdown_memcg_caches(struct kmem_cache *s,
|
|
struct list_head *release, bool *need_rcu_barrier)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
|
|
|
|
void slab_kmem_cache_release(struct kmem_cache *s)
|
|
{
|
|
__kmem_cache_release(s);
|
|
destroy_memcg_params(s);
|
|
kfree_const(s->name);
|
|
kmem_cache_free(kmem_cache, s);
|
|
}
|
|
|
|
void kmem_cache_destroy(struct kmem_cache *s)
|
|
{
|
|
LIST_HEAD(release);
|
|
bool need_rcu_barrier = false;
|
|
int err;
|
|
|
|
if (unlikely(!s))
|
|
return;
|
|
|
|
get_online_cpus();
|
|
get_online_mems();
|
|
|
|
kasan_cache_destroy(s);
|
|
mutex_lock(&slab_mutex);
|
|
|
|
s->refcount--;
|
|
if (s->refcount)
|
|
goto out_unlock;
|
|
|
|
err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
|
|
if (!err)
|
|
err = shutdown_cache(s, &release, &need_rcu_barrier);
|
|
|
|
if (err) {
|
|
pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
|
|
s->name);
|
|
dump_stack();
|
|
}
|
|
out_unlock:
|
|
mutex_unlock(&slab_mutex);
|
|
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
|
|
release_caches(&release, need_rcu_barrier);
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
|
|
/**
|
|
* kmem_cache_shrink - Shrink a cache.
|
|
* @cachep: The cache to shrink.
|
|
*
|
|
* Releases as many slabs as possible for a cache.
|
|
* To help debugging, a zero exit status indicates all slabs were released.
|
|
*/
|
|
int kmem_cache_shrink(struct kmem_cache *cachep)
|
|
{
|
|
int ret;
|
|
|
|
get_online_cpus();
|
|
get_online_mems();
|
|
kasan_cache_shrink(cachep);
|
|
ret = __kmem_cache_shrink(cachep, false);
|
|
put_online_mems();
|
|
put_online_cpus();
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
|
|
|
bool slab_is_available(void)
|
|
{
|
|
return slab_state >= UP;
|
|
}
|
|
|
|
#ifndef CONFIG_SLOB
|
|
/* Create a cache during boot when no slab services are available yet */
|
|
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
|
|
unsigned long flags)
|
|
{
|
|
int err;
|
|
|
|
s->name = name;
|
|
s->size = s->object_size = size;
|
|
s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
|
|
|
|
slab_init_memcg_params(s);
|
|
|
|
err = __kmem_cache_create(s, flags);
|
|
|
|
if (err)
|
|
panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
|
|
name, size, err);
|
|
|
|
s->refcount = -1; /* Exempt from merging for now */
|
|
}
|
|
|
|
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
|
|
unsigned long flags)
|
|
{
|
|
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
|
|
|
if (!s)
|
|
panic("Out of memory when creating slab %s\n", name);
|
|
|
|
create_boot_cache(s, name, size, flags);
|
|
list_add(&s->list, &slab_caches);
|
|
s->refcount = 1;
|
|
return s;
|
|
}
|
|
|
|
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
|
|
EXPORT_SYMBOL(kmalloc_caches);
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
|
|
EXPORT_SYMBOL(kmalloc_dma_caches);
|
|
#endif
|
|
|
|
/*
|
|
* Conversion table for small slabs sizes / 8 to the index in the
|
|
* kmalloc array. This is necessary for slabs < 192 since we have non power
|
|
* of two cache sizes there. The size of larger slabs can be determined using
|
|
* fls.
|
|
*/
|
|
static s8 size_index[24] = {
|
|
3, /* 8 */
|
|
4, /* 16 */
|
|
5, /* 24 */
|
|
5, /* 32 */
|
|
6, /* 40 */
|
|
6, /* 48 */
|
|
6, /* 56 */
|
|
6, /* 64 */
|
|
1, /* 72 */
|
|
1, /* 80 */
|
|
1, /* 88 */
|
|
1, /* 96 */
|
|
7, /* 104 */
|
|
7, /* 112 */
|
|
7, /* 120 */
|
|
7, /* 128 */
|
|
2, /* 136 */
|
|
2, /* 144 */
|
|
2, /* 152 */
|
|
2, /* 160 */
|
|
2, /* 168 */
|
|
2, /* 176 */
|
|
2, /* 184 */
|
|
2 /* 192 */
|
|
};
|
|
|
|
static inline int size_index_elem(size_t bytes)
|
|
{
|
|
return (bytes - 1) / 8;
|
|
}
|
|
|
|
/*
|
|
* Find the kmem_cache structure that serves a given size of
|
|
* allocation
|
|
*/
|
|
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
|
|
{
|
|
int index;
|
|
|
|
if (unlikely(size > KMALLOC_MAX_SIZE)) {
|
|
WARN_ON_ONCE(!(flags & __GFP_NOWARN));
|
|
return NULL;
|
|
}
|
|
|
|
if (size <= 192) {
|
|
if (!size)
|
|
return ZERO_SIZE_PTR;
|
|
|
|
index = size_index[size_index_elem(size)];
|
|
} else
|
|
index = fls(size - 1);
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
if (unlikely((flags & GFP_DMA)))
|
|
return kmalloc_dma_caches[index];
|
|
|
|
#endif
|
|
return kmalloc_caches[index];
|
|
}
|
|
|
|
/*
|
|
* kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
|
|
* kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
|
|
* kmalloc-67108864.
|
|
*/
|
|
static struct {
|
|
const char *name;
|
|
unsigned long size;
|
|
} const kmalloc_info[] __initconst = {
|
|
{NULL, 0}, {"kmalloc-96", 96},
|
|
{"kmalloc-192", 192}, {"kmalloc-8", 8},
|
|
{"kmalloc-16", 16}, {"kmalloc-32", 32},
|
|
{"kmalloc-64", 64}, {"kmalloc-128", 128},
|
|
{"kmalloc-256", 256}, {"kmalloc-512", 512},
|
|
{"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
|
|
{"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
|
|
{"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
|
|
{"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
|
|
{"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
|
|
{"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
|
|
{"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
|
|
{"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
|
|
{"kmalloc-67108864", 67108864}
|
|
};
|
|
|
|
/*
|
|
* Patch up the size_index table if we have strange large alignment
|
|
* requirements for the kmalloc array. This is only the case for
|
|
* MIPS it seems. The standard arches will not generate any code here.
|
|
*
|
|
* Largest permitted alignment is 256 bytes due to the way we
|
|
* handle the index determination for the smaller caches.
|
|
*
|
|
* Make sure that nothing crazy happens if someone starts tinkering
|
|
* around with ARCH_KMALLOC_MINALIGN
|
|
*/
|
|
void __init setup_kmalloc_cache_index_table(void)
|
|
{
|
|
int i;
|
|
|
|
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
|
|
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
|
|
|
|
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
|
|
int elem = size_index_elem(i);
|
|
|
|
if (elem >= ARRAY_SIZE(size_index))
|
|
break;
|
|
size_index[elem] = KMALLOC_SHIFT_LOW;
|
|
}
|
|
|
|
if (KMALLOC_MIN_SIZE >= 64) {
|
|
/*
|
|
* The 96 byte size cache is not used if the alignment
|
|
* is 64 byte.
|
|
*/
|
|
for (i = 64 + 8; i <= 96; i += 8)
|
|
size_index[size_index_elem(i)] = 7;
|
|
|
|
}
|
|
|
|
if (KMALLOC_MIN_SIZE >= 128) {
|
|
/*
|
|
* The 192 byte sized cache is not used if the alignment
|
|
* is 128 byte. Redirect kmalloc to use the 256 byte cache
|
|
* instead.
|
|
*/
|
|
for (i = 128 + 8; i <= 192; i += 8)
|
|
size_index[size_index_elem(i)] = 8;
|
|
}
|
|
}
|
|
|
|
static void __init new_kmalloc_cache(int idx, unsigned long flags)
|
|
{
|
|
kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
|
|
kmalloc_info[idx].size, flags);
|
|
}
|
|
|
|
/*
|
|
* Create the kmalloc array. Some of the regular kmalloc arrays
|
|
* may already have been created because they were needed to
|
|
* enable allocations for slab creation.
|
|
*/
|
|
void __init create_kmalloc_caches(unsigned long flags)
|
|
{
|
|
int i;
|
|
|
|
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
|
|
if (!kmalloc_caches[i])
|
|
new_kmalloc_cache(i, flags);
|
|
|
|
/*
|
|
* Caches that are not of the two-to-the-power-of size.
|
|
* These have to be created immediately after the
|
|
* earlier power of two caches
|
|
*/
|
|
if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
|
|
new_kmalloc_cache(1, flags);
|
|
if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
|
|
new_kmalloc_cache(2, flags);
|
|
}
|
|
|
|
/* Kmalloc array is now usable */
|
|
slab_state = UP;
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
|
|
struct kmem_cache *s = kmalloc_caches[i];
|
|
|
|
if (s) {
|
|
int size = kmalloc_size(i);
|
|
char *n = kasprintf(GFP_NOWAIT,
|
|
"dma-kmalloc-%d", size);
|
|
|
|
BUG_ON(!n);
|
|
kmalloc_dma_caches[i] = create_kmalloc_cache(n,
|
|
size, SLAB_CACHE_DMA | flags);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
#endif /* !CONFIG_SLOB */
|
|
|
|
/*
|
|
* To avoid unnecessary overhead, we pass through large allocation requests
|
|
* directly to the page allocator. We use __GFP_COMP, because we will need to
|
|
* know the allocation order to free the pages properly in kfree.
|
|
*/
|
|
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
|
{
|
|
void *ret;
|
|
struct page *page;
|
|
|
|
flags |= __GFP_COMP;
|
|
page = alloc_kmem_pages(flags, order);
|
|
ret = page ? page_address(page) : NULL;
|
|
kmemleak_alloc(ret, size, 1, flags);
|
|
kasan_kmalloc_large(ret, size, flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(kmalloc_order);
|
|
|
|
#ifdef CONFIG_TRACING
|
|
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
|
{
|
|
void *ret = kmalloc_order(size, flags, order);
|
|
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(kmalloc_order_trace);
|
|
#endif
|
|
|
|
#ifdef CONFIG_SLABINFO
|
|
|
|
#ifdef CONFIG_SLAB
|
|
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
|
|
#else
|
|
#define SLABINFO_RIGHTS S_IRUSR
|
|
#endif
|
|
|
|
static void print_slabinfo_header(struct seq_file *m)
|
|
{
|
|
/*
|
|
* Output format version, so at least we can change it
|
|
* without _too_ many complaints.
|
|
*/
|
|
#ifdef CONFIG_DEBUG_SLAB
|
|
seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
|
|
#else
|
|
seq_puts(m, "slabinfo - version: 2.1\n");
|
|
#endif
|
|
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
|
|
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
|
|
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
|
|
#ifdef CONFIG_DEBUG_SLAB
|
|
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
|
|
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
|
|
#endif
|
|
seq_putc(m, '\n');
|
|
}
|
|
|
|
void *slab_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
mutex_lock(&slab_mutex);
|
|
return seq_list_start(&slab_caches, *pos);
|
|
}
|
|
|
|
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
|
|
{
|
|
return seq_list_next(p, &slab_caches, pos);
|
|
}
|
|
|
|
void slab_stop(struct seq_file *m, void *p)
|
|
{
|
|
mutex_unlock(&slab_mutex);
|
|
}
|
|
|
|
static void
|
|
memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
|
|
{
|
|
struct kmem_cache *c;
|
|
struct slabinfo sinfo;
|
|
|
|
if (!is_root_cache(s))
|
|
return;
|
|
|
|
for_each_memcg_cache(c, s) {
|
|
memset(&sinfo, 0, sizeof(sinfo));
|
|
get_slabinfo(c, &sinfo);
|
|
|
|
info->active_slabs += sinfo.active_slabs;
|
|
info->num_slabs += sinfo.num_slabs;
|
|
info->shared_avail += sinfo.shared_avail;
|
|
info->active_objs += sinfo.active_objs;
|
|
info->num_objs += sinfo.num_objs;
|
|
}
|
|
}
|
|
|
|
static void cache_show(struct kmem_cache *s, struct seq_file *m)
|
|
{
|
|
struct slabinfo sinfo;
|
|
|
|
memset(&sinfo, 0, sizeof(sinfo));
|
|
get_slabinfo(s, &sinfo);
|
|
|
|
memcg_accumulate_slabinfo(s, &sinfo);
|
|
|
|
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
|
|
cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
|
|
sinfo.objects_per_slab, (1 << sinfo.cache_order));
|
|
|
|
seq_printf(m, " : tunables %4u %4u %4u",
|
|
sinfo.limit, sinfo.batchcount, sinfo.shared);
|
|
seq_printf(m, " : slabdata %6lu %6lu %6lu",
|
|
sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
|
|
slabinfo_show_stats(m, s);
|
|
seq_putc(m, '\n');
|
|
}
|
|
|
|
static int slab_show(struct seq_file *m, void *p)
|
|
{
|
|
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
|
|
|
|
if (p == slab_caches.next)
|
|
print_slabinfo_header(m);
|
|
if (is_root_cache(s))
|
|
cache_show(s, m);
|
|
return 0;
|
|
}
|
|
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
|
int memcg_slab_show(struct seq_file *m, void *p)
|
|
{
|
|
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
|
|
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
|
|
|
|
if (p == slab_caches.next)
|
|
print_slabinfo_header(m);
|
|
if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
|
|
cache_show(s, m);
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* slabinfo_op - iterator that generates /proc/slabinfo
|
|
*
|
|
* Output layout:
|
|
* cache-name
|
|
* num-active-objs
|
|
* total-objs
|
|
* object size
|
|
* num-active-slabs
|
|
* total-slabs
|
|
* num-pages-per-slab
|
|
* + further values on SMP and with statistics enabled
|
|
*/
|
|
static const struct seq_operations slabinfo_op = {
|
|
.start = slab_start,
|
|
.next = slab_next,
|
|
.stop = slab_stop,
|
|
.show = slab_show,
|
|
};
|
|
|
|
static int slabinfo_open(struct inode *inode, struct file *file)
|
|
{
|
|
return seq_open(file, &slabinfo_op);
|
|
}
|
|
|
|
static const struct file_operations proc_slabinfo_operations = {
|
|
.open = slabinfo_open,
|
|
.read = seq_read,
|
|
.write = slabinfo_write,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
static int __init slab_proc_init(void)
|
|
{
|
|
proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
|
|
&proc_slabinfo_operations);
|
|
return 0;
|
|
}
|
|
module_init(slab_proc_init);
|
|
#endif /* CONFIG_SLABINFO */
|
|
|
|
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
|
|
gfp_t flags)
|
|
{
|
|
void *ret;
|
|
size_t ks = 0;
|
|
|
|
if (p)
|
|
ks = ksize(p);
|
|
|
|
if (ks >= new_size) {
|
|
kasan_krealloc((void *)p, new_size, flags);
|
|
return (void *)p;
|
|
}
|
|
|
|
ret = kmalloc_track_caller(new_size, flags);
|
|
if (ret && p)
|
|
memcpy(ret, p, ks);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* __krealloc - like krealloc() but don't free @p.
|
|
* @p: object to reallocate memory for.
|
|
* @new_size: how many bytes of memory are required.
|
|
* @flags: the type of memory to allocate.
|
|
*
|
|
* This function is like krealloc() except it never frees the originally
|
|
* allocated buffer. Use this if you don't want to free the buffer immediately
|
|
* like, for example, with RCU.
|
|
*/
|
|
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
|
|
{
|
|
if (unlikely(!new_size))
|
|
return ZERO_SIZE_PTR;
|
|
|
|
return __do_krealloc(p, new_size, flags);
|
|
|
|
}
|
|
EXPORT_SYMBOL(__krealloc);
|
|
|
|
/**
|
|
* krealloc - reallocate memory. The contents will remain unchanged.
|
|
* @p: object to reallocate memory for.
|
|
* @new_size: how many bytes of memory are required.
|
|
* @flags: the type of memory to allocate.
|
|
*
|
|
* The contents of the object pointed to are preserved up to the
|
|
* lesser of the new and old sizes. If @p is %NULL, krealloc()
|
|
* behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
|
|
* %NULL pointer, the object pointed to is freed.
|
|
*/
|
|
void *krealloc(const void *p, size_t new_size, gfp_t flags)
|
|
{
|
|
void *ret;
|
|
|
|
if (unlikely(!new_size)) {
|
|
kfree(p);
|
|
return ZERO_SIZE_PTR;
|
|
}
|
|
|
|
ret = __do_krealloc(p, new_size, flags);
|
|
if (ret && p != ret)
|
|
kfree(p);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(krealloc);
|
|
|
|
/**
|
|
* kzfree - like kfree but zero memory
|
|
* @p: object to free memory of
|
|
*
|
|
* The memory of the object @p points to is zeroed before freed.
|
|
* If @p is %NULL, kzfree() does nothing.
|
|
*
|
|
* Note: this function zeroes the whole allocated buffer which can be a good
|
|
* deal bigger than the requested buffer size passed to kmalloc(). So be
|
|
* careful when using this function in performance sensitive code.
|
|
*/
|
|
void kzfree(const void *p)
|
|
{
|
|
size_t ks;
|
|
void *mem = (void *)p;
|
|
|
|
if (unlikely(ZERO_OR_NULL_PTR(mem)))
|
|
return;
|
|
ks = ksize(mem);
|
|
memset(mem, 0, ks);
|
|
kfree(mem);
|
|
}
|
|
EXPORT_SYMBOL(kzfree);
|
|
|
|
/* Tracepoints definitions. */
|
|
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
|
|
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
|
|
EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
|
|
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
|
|
EXPORT_TRACEPOINT_SYMBOL(kfree);
|
|
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
|