mm: slab: fix potential double free in ___cache_free

With the commit 10befea91b ("mm: memcg/slab: use a single set of
kmem_caches for all allocations"), it becomes possible to call kfree()
from the slabs_destroy().

The functions cache_flusharray() and do_drain() calls slabs_destroy() on
array_cache of the local CPU without updating the size of the
array_cache.  This enables the kfree() call from the slabs_destroy() to
recursively call cache_flusharray() which can potentially call
free_block() on the same elements of the array_cache of the local CPU
and causing double free and memory corruption.

To fix the issue, simply update the local CPU array_cache cache before
calling slabs_destroy().

Fixes: 10befea91b ("mm: memcg/slab: use a single set of kmem_caches for all allocations")
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Reported-by: kernel test robot <rong.a.chen@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ted Ts'o <tytso@mit.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Shakeel Butt 2020-09-26 07:13:41 -07:00 committed by Linus Torvalds
parent 7c7ec3226f
commit 678ff6a7af

View File

@ -1632,6 +1632,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
kmem_cache_free(cachep->freelist_cache, freelist); kmem_cache_free(cachep->freelist_cache, freelist);
} }
/*
* Update the size of the caches before calling slabs_destroy as it may
* recursively call kfree.
*/
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{ {
struct page *page, *n; struct page *page, *n;
@ -2153,8 +2157,8 @@ static void do_drain(void *arg)
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node, &list); free_block(cachep, ac->entry, ac->avail, node, &list);
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
ac->avail = 0; ac->avail = 0;
slabs_destroy(cachep, &list);
} }
static void drain_cpu_caches(struct kmem_cache *cachep) static void drain_cpu_caches(struct kmem_cache *cachep)
@ -3402,9 +3406,9 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
} }
#endif #endif
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
ac->avail -= batchcount; ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
slabs_destroy(cachep, &list);
} }
/* /*