mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:40:58 +07:00
mm: kmem: prepare remote memcg charging infra for interrupt contexts
Remote memcg charging API uses current->active_memcg to store the currently active memory cgroup, which overwrites the memory cgroup of the current process. It works well for normal contexts, but doesn't work for interrupt contexts: indeed, if an interrupt occurs during the execution of a section with an active memcg set, all allocations inside the interrupt will be charged to the active memcg set (given that we'll enable accounting for allocations from an interrupt context). But because the interrupt might have no relation to the active memcg set outside, it's obviously wrong from the accounting prospective. To resolve this problem, let's add a global percpu int_active_memcg variable, which will be used to store an active memory cgroup which will be used from interrupt contexts. set_active_memcg() will transparently use current->active_memcg or int_active_memcg depending on the context. To make the read part simple and transparent for the caller, let's introduce two new functions: - struct mem_cgroup *active_memcg(void), - struct mem_cgroup *get_active_memcg(void). They are returning the active memcg if it's set, hiding all implementation details: where to get it depending on the current context. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Link: http://lkml.kernel.org/r/20200827225843.1270629-4-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
67f0286498
commit
37d5985c00
@ -279,6 +279,7 @@ static inline void memalloc_nocma_restore(unsigned int flags)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
|
||||
/**
|
||||
* set_active_memcg - Starts the remote memcg charging scope.
|
||||
* @memcg: memcg to charge.
|
||||
@ -293,8 +294,16 @@ static inline void memalloc_nocma_restore(unsigned int flags)
|
||||
static inline struct mem_cgroup *
|
||||
set_active_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
struct mem_cgroup *old = current->active_memcg;
|
||||
current->active_memcg = memcg;
|
||||
struct mem_cgroup *old;
|
||||
|
||||
if (in_interrupt()) {
|
||||
old = this_cpu_read(int_active_memcg);
|
||||
this_cpu_write(int_active_memcg, memcg);
|
||||
} else {
|
||||
old = current->active_memcg;
|
||||
current->active_memcg = memcg;
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
#else
|
||||
|
@ -73,6 +73,9 @@ EXPORT_SYMBOL(memory_cgrp_subsys);
|
||||
|
||||
struct mem_cgroup *root_mem_cgroup __read_mostly;
|
||||
|
||||
/* Active memory cgroup to use from an interrupt context */
|
||||
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
|
||||
|
||||
/* Socket memory accounting disabled? */
|
||||
static bool cgroup_memory_nosocket;
|
||||
|
||||
@ -1061,26 +1064,43 @@ struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(get_mem_cgroup_from_page);
|
||||
|
||||
static __always_inline struct mem_cgroup *active_memcg(void)
|
||||
{
|
||||
if (in_interrupt())
|
||||
return this_cpu_read(int_active_memcg);
|
||||
else
|
||||
return current->active_memcg;
|
||||
}
|
||||
|
||||
static __always_inline struct mem_cgroup *get_active_memcg(void)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
|
||||
rcu_read_lock();
|
||||
memcg = active_memcg();
|
||||
if (memcg) {
|
||||
/* current->active_memcg must hold a ref. */
|
||||
if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
|
||||
memcg = root_mem_cgroup;
|
||||
else
|
||||
memcg = current->active_memcg;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return memcg;
|
||||
}
|
||||
|
||||
/**
|
||||
* If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
|
||||
* If active memcg is set, do not fallback to current->mm->memcg.
|
||||
*/
|
||||
static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
|
||||
{
|
||||
if (memcg_kmem_bypass())
|
||||
return NULL;
|
||||
|
||||
if (unlikely(current->active_memcg)) {
|
||||
struct mem_cgroup *memcg;
|
||||
if (unlikely(active_memcg()))
|
||||
return get_active_memcg();
|
||||
|
||||
rcu_read_lock();
|
||||
/* current->active_memcg must hold a ref. */
|
||||
if (WARN_ON_ONCE(!css_tryget(¤t->active_memcg->css)))
|
||||
memcg = root_mem_cgroup;
|
||||
else
|
||||
memcg = current->active_memcg;
|
||||
rcu_read_unlock();
|
||||
return memcg;
|
||||
}
|
||||
return get_mem_cgroup_from_mm(current->mm);
|
||||
}
|
||||
|
||||
@ -2940,8 +2960,8 @@ __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
if (unlikely(current->active_memcg))
|
||||
memcg = rcu_dereference(current->active_memcg);
|
||||
if (unlikely(active_memcg()))
|
||||
memcg = active_memcg();
|
||||
else
|
||||
memcg = mem_cgroup_from_task(current);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user