mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 00:06:46 +07:00
86a294a81f
"mm: consider compaction feedback also for costly allocation" has
removed the upper bound for the reclaim/compaction retries based on the
number of reclaimed pages for costly orders. While this is desirable
the patch did miss a mis interaction between reclaim, compaction and the
retry logic. The direct reclaim tries to get zones over min watermark
while compaction backs off and returns COMPACT_SKIPPED when all zones
are below low watermark + 1<<order gap. If we are getting really close
to OOM then __compaction_suitable can keep returning COMPACT_SKIPPED a
high order request (e.g. hugetlb order-9) while the reclaim is not able
to release enough pages to get us over low watermark. The reclaim is
still able to make some progress (usually trashing over few remaining
pages) so we are not able to break out from the loop.
I have seen this happening with the same test described in "mm: consider
compaction feedback also for costly allocation" on a swapless system.
The original problem got resolved by "vmscan: consider classzone_idx in
compaction_ready" but it shows how things might go wrong when we
approach the oom event horizont.
The reason why compaction requires being over low rather than min
watermark is not clear to me. This check was there essentially since
56de7263fc
("mm: compaction: direct compact when a high-order
allocation fails"). It is clearly an implementation detail though and
we shouldn't pull it into the generic retry logic while we should be
able to cope with such eventuality. The only place in
should_compact_retry where we retry without any upper bound is for
compaction_withdrawn() case.
Introduce compaction_zonelist_suitable function which checks the given
zonelist and returns true only if there is at least one zone which would
would unblock __compaction_suitable if more memory got reclaimed. In
this implementation it checks __compaction_suitable with NR_FREE_PAGES
plus part of the reclaimable memory as the target for the watermark
check. The reclaimable memory is reduced linearly by the allocation
order. The idea is that we do not want to reclaim all the remaining
memory for a single allocation request just unblock
__compaction_suitable which doesn't guarantee we will make a further
progress.
The new helper is then used if compaction_withdrawn() feedback was
provided so we do not retry if there is no outlook for a further
progress. !costly requests shouldn't be affected much - e.g. order-2
pages would require to have at least 64kB on the reclaimable LRUs while
order-9 would need at least 32M which should be enough to not lock up.
[vbabka@suse.cz: fix classzone_idx vs. high_zoneidx usage in compaction_zonelist_suitable]
[akpm@linux-foundation.org: fix it for Mel's mm-page_alloc-remove-field-from-alloc_context.patch]
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
231 lines
6.2 KiB
C
231 lines
6.2 KiB
C
#ifndef _LINUX_COMPACTION_H
|
|
#define _LINUX_COMPACTION_H
|
|
|
|
/* Return values for compact_zone() and try_to_compact_pages() */
|
|
/* When adding new states, please adjust include/trace/events/compaction.h */
|
|
enum compact_result {
|
|
/* For more detailed tracepoint output - internal to compaction */
|
|
COMPACT_NOT_SUITABLE_ZONE,
|
|
/*
|
|
* compaction didn't start as it was not possible or direct reclaim
|
|
* was more suitable
|
|
*/
|
|
COMPACT_SKIPPED,
|
|
/* compaction didn't start as it was deferred due to past failures */
|
|
COMPACT_DEFERRED,
|
|
|
|
/* compaction not active last round */
|
|
COMPACT_INACTIVE = COMPACT_DEFERRED,
|
|
|
|
/* For more detailed tracepoint output - internal to compaction */
|
|
COMPACT_NO_SUITABLE_PAGE,
|
|
/* compaction should continue to another pageblock */
|
|
COMPACT_CONTINUE,
|
|
|
|
/*
|
|
* The full zone was compacted scanned but wasn't successfull to compact
|
|
* suitable pages.
|
|
*/
|
|
COMPACT_COMPLETE,
|
|
/*
|
|
* direct compaction has scanned part of the zone but wasn't successfull
|
|
* to compact suitable pages.
|
|
*/
|
|
COMPACT_PARTIAL_SKIPPED,
|
|
|
|
/* compaction terminated prematurely due to lock contentions */
|
|
COMPACT_CONTENDED,
|
|
|
|
/*
|
|
* direct compaction partially compacted a zone and there might be
|
|
* suitable pages
|
|
*/
|
|
COMPACT_PARTIAL,
|
|
};
|
|
|
|
/* Used to signal whether compaction detected need_sched() or lock contention */
|
|
/* No contention detected */
|
|
#define COMPACT_CONTENDED_NONE 0
|
|
/* Either need_sched() was true or fatal signal pending */
|
|
#define COMPACT_CONTENDED_SCHED 1
|
|
/* Zone lock or lru_lock was contended in async compaction */
|
|
#define COMPACT_CONTENDED_LOCK 2
|
|
|
|
struct alloc_context; /* in mm/internal.h */
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
extern int sysctl_compact_memory;
|
|
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
|
extern int sysctl_extfrag_threshold;
|
|
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
|
extern int sysctl_compact_unevictable_allowed;
|
|
|
|
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
|
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
|
|
unsigned int order,
|
|
unsigned int alloc_flags, const struct alloc_context *ac,
|
|
enum migrate_mode mode, int *contended);
|
|
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
|
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
|
extern enum compact_result compaction_suitable(struct zone *zone, int order,
|
|
unsigned int alloc_flags, int classzone_idx);
|
|
|
|
extern void defer_compaction(struct zone *zone, int order);
|
|
extern bool compaction_deferred(struct zone *zone, int order);
|
|
extern void compaction_defer_reset(struct zone *zone, int order,
|
|
bool alloc_success);
|
|
extern bool compaction_restarting(struct zone *zone, int order);
|
|
|
|
/* Compaction has made some progress and retrying makes sense */
|
|
static inline bool compaction_made_progress(enum compact_result result)
|
|
{
|
|
/*
|
|
* Even though this might sound confusing this in fact tells us
|
|
* that the compaction successfully isolated and migrated some
|
|
* pageblocks.
|
|
*/
|
|
if (result == COMPACT_PARTIAL)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
/* Compaction has failed and it doesn't make much sense to keep retrying. */
|
|
static inline bool compaction_failed(enum compact_result result)
|
|
{
|
|
/* All zones were scanned completely and still not result. */
|
|
if (result == COMPACT_COMPLETE)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* Compaction has backed off for some reason. It might be throttling or
|
|
* lock contention. Retrying is still worthwhile.
|
|
*/
|
|
static inline bool compaction_withdrawn(enum compact_result result)
|
|
{
|
|
/*
|
|
* Compaction backed off due to watermark checks for order-0
|
|
* so the regular reclaim has to try harder and reclaim something.
|
|
*/
|
|
if (result == COMPACT_SKIPPED)
|
|
return true;
|
|
|
|
/*
|
|
* If compaction is deferred for high-order allocations, it is
|
|
* because sync compaction recently failed. If this is the case
|
|
* and the caller requested a THP allocation, we do not want
|
|
* to heavily disrupt the system, so we fail the allocation
|
|
* instead of entering direct reclaim.
|
|
*/
|
|
if (result == COMPACT_DEFERRED)
|
|
return true;
|
|
|
|
/*
|
|
* If compaction in async mode encounters contention or blocks higher
|
|
* priority task we back off early rather than cause stalls.
|
|
*/
|
|
if (result == COMPACT_CONTENDED)
|
|
return true;
|
|
|
|
/*
|
|
* Page scanners have met but we haven't scanned full zones so this
|
|
* is a back off in fact.
|
|
*/
|
|
if (result == COMPACT_PARTIAL_SKIPPED)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
|
|
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
|
|
int alloc_flags);
|
|
|
|
extern int kcompactd_run(int nid);
|
|
extern void kcompactd_stop(int nid);
|
|
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
|
|
|
|
#else
|
|
static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
|
|
unsigned int order, int alloc_flags,
|
|
const struct alloc_context *ac,
|
|
enum migrate_mode mode, int *contended)
|
|
{
|
|
return COMPACT_CONTINUE;
|
|
}
|
|
|
|
static inline void compact_pgdat(pg_data_t *pgdat, int order)
|
|
{
|
|
}
|
|
|
|
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline enum compact_result compaction_suitable(struct zone *zone, int order,
|
|
int alloc_flags, int classzone_idx)
|
|
{
|
|
return COMPACT_SKIPPED;
|
|
}
|
|
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
|
{
|
|
}
|
|
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool compaction_made_progress(enum compact_result result)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool compaction_failed(enum compact_result result)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool compaction_withdrawn(enum compact_result result)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline int kcompactd_run(int nid)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void kcompactd_stop(int nid)
|
|
{
|
|
}
|
|
|
|
static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_COMPACTION */
|
|
|
|
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
|
|
extern int compaction_register_node(struct node *node);
|
|
extern void compaction_unregister_node(struct node *node);
|
|
|
|
#else
|
|
|
|
static inline int compaction_register_node(struct node *node)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void compaction_unregister_node(struct node *node)
|
|
{
|
|
}
|
|
#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
|
|
|
|
#endif /* _LINUX_COMPACTION_H */
|