mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 13:50:52 +07:00
slab: introduce krealloc
This introduce krealloc() that reallocates memory while keeping the contents unchanged. The allocator avoids reallocation if the new size fits the currently used cache. I also added a simple non-optimized version for mm/slob.c for compatibility. [akpm@linux-foundation.org: fix warnings] Acked-by: Josef Sipek <jsipek@fsl.cs.sunysb.edu> Acked-by: Matt Mackall <mpm@selenic.com> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e3ebadd95c
commit
fd76bab2fa
@ -72,8 +72,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
|
||||
*/
|
||||
void *__kmalloc(size_t, gfp_t);
|
||||
void *__kzalloc(size_t, gfp_t);
|
||||
void * __must_check krealloc(const void *, size_t, gfp_t);
|
||||
void kfree(const void *);
|
||||
unsigned int ksize(const void *);
|
||||
size_t ksize(const void *);
|
||||
|
||||
/**
|
||||
* kcalloc - allocate memory for an array. The memory is set to zero.
|
||||
|
49
mm/slab.c
49
mm/slab.c
@ -3739,6 +3739,53 @@ void *__kmalloc(size_t size, gfp_t flags)
|
||||
EXPORT_SYMBOL(__kmalloc);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* krealloc - reallocate memory. The contents will remain unchanged.
|
||||
*
|
||||
* @p: object to reallocate memory for.
|
||||
* @new_size: how many bytes of memory are required.
|
||||
* @flags: the type of memory to allocate.
|
||||
*
|
||||
* The contents of the object pointed to are preserved up to the
|
||||
* lesser of the new and old sizes. If @p is %NULL, krealloc()
|
||||
* behaves exactly like kmalloc(). If @size is 0 and @p is not a
|
||||
* %NULL pointer, the object pointed to is freed.
|
||||
*/
|
||||
void *krealloc(const void *p, size_t new_size, gfp_t flags)
|
||||
{
|
||||
struct kmem_cache *cache, *new_cache;
|
||||
void *ret;
|
||||
|
||||
if (unlikely(!p))
|
||||
return kmalloc_track_caller(new_size, flags);
|
||||
|
||||
if (unlikely(!new_size)) {
|
||||
kfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cache = virt_to_cache(p);
|
||||
new_cache = __find_general_cachep(new_size, flags);
|
||||
|
||||
/*
|
||||
* If new size fits in the current cache, bail out.
|
||||
*/
|
||||
if (likely(cache == new_cache))
|
||||
return (void *)p;
|
||||
|
||||
/*
|
||||
* We are on the slow-path here so do not use __cache_alloc
|
||||
* because it bloats kernel text.
|
||||
*/
|
||||
ret = kmalloc_track_caller(new_size, flags);
|
||||
if (ret) {
|
||||
memcpy(ret, p, min(new_size, ksize(p)));
|
||||
kfree(p);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(krealloc);
|
||||
|
||||
/**
|
||||
* kmem_cache_free - Deallocate an object
|
||||
* @cachep: The cache the allocation was from.
|
||||
@ -4481,7 +4528,7 @@ const struct seq_operations slabstats_op = {
|
||||
* allocated with either kmalloc() or kmem_cache_alloc(). The object
|
||||
* must not be freed during the duration of the call.
|
||||
*/
|
||||
unsigned int ksize(const void *objp)
|
||||
size_t ksize(const void *objp)
|
||||
{
|
||||
if (unlikely(objp == NULL))
|
||||
return 0;
|
||||
|
35
mm/slob.c
35
mm/slob.c
@ -190,6 +190,39 @@ void *__kmalloc(size_t size, gfp_t gfp)
|
||||
}
|
||||
EXPORT_SYMBOL(__kmalloc);
|
||||
|
||||
/**
|
||||
* krealloc - reallocate memory. The contents will remain unchanged.
|
||||
*
|
||||
* @p: object to reallocate memory for.
|
||||
* @new_size: how many bytes of memory are required.
|
||||
* @flags: the type of memory to allocate.
|
||||
*
|
||||
* The contents of the object pointed to are preserved up to the
|
||||
* lesser of the new and old sizes. If @p is %NULL, krealloc()
|
||||
* behaves exactly like kmalloc(). If @size is 0 and @p is not a
|
||||
* %NULL pointer, the object pointed to is freed.
|
||||
*/
|
||||
void *krealloc(const void *p, size_t new_size, gfp_t flags)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (unlikely(!p))
|
||||
return kmalloc_track_caller(new_size, flags);
|
||||
|
||||
if (unlikely(!new_size)) {
|
||||
kfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = kmalloc_track_caller(new_size, flags);
|
||||
if (ret) {
|
||||
memcpy(ret, p, min(new_size, ksize(p)));
|
||||
kfree(p);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(krealloc);
|
||||
|
||||
void kfree(const void *block)
|
||||
{
|
||||
bigblock_t *bb, **last = &bigblocks;
|
||||
@ -219,7 +252,7 @@ void kfree(const void *block)
|
||||
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
unsigned int ksize(const void *block)
|
||||
size_t ksize(const void *block)
|
||||
{
|
||||
bigblock_t *bb;
|
||||
unsigned long flags;
|
||||
|
Loading…
Reference in New Issue
Block a user