mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 15:40:53 +07:00
f7ce3190c4
Currently, kmem_cache stores a pointer to struct memcg_cache_params instead of embedding it. The rationale is to save memory when kmem accounting is disabled. However, the memcg_cache_params has shrivelled drastically since it was first introduced: * Initially: struct memcg_cache_params { bool is_root_cache; union { struct kmem_cache *memcg_caches[0]; struct { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; bool dead; atomic_t nr_pages; struct work_struct destroy; }; }; }; * Now: struct memcg_cache_params { bool is_root_cache; union { struct { struct rcu_head rcu_head; struct kmem_cache *memcg_caches[0]; }; struct { struct mem_cgroup *memcg; struct kmem_cache *root_cache; }; }; }; So the memory saving does not seem to be a clear win anymore. OTOH, keeping a pointer to memcg_cache_params struct instead of embedding it results in touching one more cache line on kmem alloc/free hot paths. Besides, it makes linking kmem caches in a list chained by a field of struct memcg_cache_params really painful due to a level of indirection, while I want to make them linked in the following patch. That said, let us embed it. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
80 lines
1.9 KiB
C
80 lines
1.9 KiB
C
#ifndef _LINUX_SLAB_DEF_H
|
|
#define _LINUX_SLAB_DEF_H
|
|
|
|
#include <linux/reciprocal_div.h>
|
|
|
|
/*
|
|
* Definitions unique to the original Linux SLAB allocator.
|
|
*/
|
|
|
|
struct kmem_cache {
|
|
struct array_cache __percpu *cpu_cache;
|
|
|
|
/* 1) Cache tunables. Protected by slab_mutex */
|
|
unsigned int batchcount;
|
|
unsigned int limit;
|
|
unsigned int shared;
|
|
|
|
unsigned int size;
|
|
struct reciprocal_value reciprocal_buffer_size;
|
|
/* 2) touched by every alloc & free from the backend */
|
|
|
|
unsigned int flags; /* constant flags */
|
|
unsigned int num; /* # of objs per slab */
|
|
|
|
/* 3) cache_grow/shrink */
|
|
/* order of pgs per slab (2^n) */
|
|
unsigned int gfporder;
|
|
|
|
/* force GFP flags, e.g. GFP_DMA */
|
|
gfp_t allocflags;
|
|
|
|
size_t colour; /* cache colouring range */
|
|
unsigned int colour_off; /* colour offset */
|
|
struct kmem_cache *freelist_cache;
|
|
unsigned int freelist_size;
|
|
|
|
/* constructor func */
|
|
void (*ctor)(void *obj);
|
|
|
|
/* 4) cache creation/removal */
|
|
const char *name;
|
|
struct list_head list;
|
|
int refcount;
|
|
int object_size;
|
|
int align;
|
|
|
|
/* 5) statistics */
|
|
#ifdef CONFIG_DEBUG_SLAB
|
|
unsigned long num_active;
|
|
unsigned long num_allocations;
|
|
unsigned long high_mark;
|
|
unsigned long grown;
|
|
unsigned long reaped;
|
|
unsigned long errors;
|
|
unsigned long max_freeable;
|
|
unsigned long node_allocs;
|
|
unsigned long node_frees;
|
|
unsigned long node_overflow;
|
|
atomic_t allochit;
|
|
atomic_t allocmiss;
|
|
atomic_t freehit;
|
|
atomic_t freemiss;
|
|
|
|
/*
|
|
* If debugging is enabled, then the allocator can add additional
|
|
* fields and/or padding to every object. size contains the total
|
|
* object size including these internal fields, the following two
|
|
* variables contain the offset to the user object and its size.
|
|
*/
|
|
int obj_offset;
|
|
#endif /* CONFIG_DEBUG_SLAB */
|
|
#ifdef CONFIG_MEMCG_KMEM
|
|
struct memcg_cache_params memcg_params;
|
|
#endif
|
|
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
|
};
|
|
|
|
#endif /* _LINUX_SLAB_DEF_H */
|