mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 05:29:12 +07:00
bf8d5d52ff
Memory controller implements the memory.low best-effort memory protection mechanism, which works perfectly in many cases and allows protecting working sets of important workloads from sudden reclaim. But its semantics has a significant limitation: it works only as long as there is a supply of reclaimable memory. This makes it pretty useless against any sort of slow memory leaks or memory usage increases. This is especially true for swapless systems. If swap is enabled, memory soft protection effectively postpones problems, allowing a leaking application to fill all swap area, which makes no sense. The only effective way to guarantee the memory protection in this case is to invoke the OOM killer. It's possible to handle this case in userspace by reacting on MEMCG_LOW events; but there is still a place for a fail-safe in-kernel mechanism to provide stronger guarantees. This patch introduces the memory.min interface for cgroup v2 memory controller. It works very similarly to memory.low (sharing the same hierarchical behavior), except that it's not disabled if there is no more reclaimable memory in the system. If cgroup is not populated, its memory.min is ignored, because otherwise even the OOM killer wouldn't be able to reclaim the protected memory, and the system can stall. [guro@fb.com: s/low/min/ in docs] Link: http://lkml.kernel.org/r/20180510130758.GA9129@castle.DHCP.thefacebook.com Link: http://lkml.kernel.org/r/20180509180734.GA4856@castle.DHCP.thefacebook.com Signed-off-by: Roman Gushchin <guro@fb.com> Reviewed-by: Randy Dunlap <rdunlap@infradead.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
68 lines
1.9 KiB
C
68 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_PAGE_COUNTER_H
|
|
#define _LINUX_PAGE_COUNTER_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/kernel.h>
|
|
#include <asm/page.h>
|
|
|
|
struct page_counter {
|
|
atomic_long_t usage;
|
|
unsigned long min;
|
|
unsigned long low;
|
|
unsigned long max;
|
|
struct page_counter *parent;
|
|
|
|
/* effective memory.min and memory.min usage tracking */
|
|
unsigned long emin;
|
|
atomic_long_t min_usage;
|
|
atomic_long_t children_min_usage;
|
|
|
|
/* effective memory.low and memory.low usage tracking */
|
|
unsigned long elow;
|
|
atomic_long_t low_usage;
|
|
atomic_long_t children_low_usage;
|
|
|
|
/* legacy */
|
|
unsigned long watermark;
|
|
unsigned long failcnt;
|
|
};
|
|
|
|
#if BITS_PER_LONG == 32
|
|
#define PAGE_COUNTER_MAX LONG_MAX
|
|
#else
|
|
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
|
|
#endif
|
|
|
|
static inline void page_counter_init(struct page_counter *counter,
|
|
struct page_counter *parent)
|
|
{
|
|
atomic_long_set(&counter->usage, 0);
|
|
counter->max = PAGE_COUNTER_MAX;
|
|
counter->parent = parent;
|
|
}
|
|
|
|
static inline unsigned long page_counter_read(struct page_counter *counter)
|
|
{
|
|
return atomic_long_read(&counter->usage);
|
|
}
|
|
|
|
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
|
|
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
|
|
bool page_counter_try_charge(struct page_counter *counter,
|
|
unsigned long nr_pages,
|
|
struct page_counter **fail);
|
|
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
|
|
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
|
|
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
|
|
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
|
|
int page_counter_memparse(const char *buf, const char *max,
|
|
unsigned long *nr_pages);
|
|
|
|
static inline void page_counter_reset_watermark(struct page_counter *counter)
|
|
{
|
|
counter->watermark = page_counter_read(counter);
|
|
}
|
|
|
|
#endif /* _LINUX_PAGE_COUNTER_H */
|