mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 23:16:49 +07:00
8fb74b9fb2
Eric Wong reported on 3.7 and 3.8-rc2 that ppoll() got stuck when waiting for POLLIN on a local TCP socket. It was easier to trigger if there was disk IO and dirty pages at the same time and he bisected it to commit1fb3f8ca0e
("mm: compaction: capture a suitable high-order page immediately when it is made available"). The intention of that patch was to improve high-order allocations under memory pressure after changes made to reclaim in 3.6 drastically hurt THP allocations but the approach was flawed. For Eric, the problem was that page->pfmemalloc was not being cleared for captured pages leading to a poor interaction with swap-over-NFS support causing the packets to be dropped. However, I identified a few more problems with the patch including the fact that it can increase contention on zone->lock in some cases which could result in async direct compaction being aborted early. In retrospect the capture patch took the wrong approach. What it should have done is mark the pageblock being migrated as MIGRATE_ISOLATE if it was allocating for THP and avoided races that way. While the patch was showing to improve allocation success rates at the time, the benefit is marginal given the relative complexity and it should be revisited from scratch in the context of the other reclaim-related changes that have taken place since the patch was first written and tested. This patch partially reverts commit1fb3f8ca0e
("mm: compaction: capture a suitable high-order page immediately when it is made available"). Reported-and-tested-by: Eric Wong <normalperson@yhbt.net> Tested-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
125 lines
3.6 KiB
C
125 lines
3.6 KiB
C
#ifndef _LINUX_COMPACTION_H
|
|
#define _LINUX_COMPACTION_H
|
|
|
|
/* Return values for compact_zone() and try_to_compact_pages() */
|
|
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
|
|
#define COMPACT_SKIPPED 0
|
|
/* compaction should continue to another pageblock */
|
|
#define COMPACT_CONTINUE 1
|
|
/* direct compaction partially compacted a zone and there are suitable pages */
|
|
#define COMPACT_PARTIAL 2
|
|
/* The full zone was compacted */
|
|
#define COMPACT_COMPLETE 3
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
extern int sysctl_compact_memory;
|
|
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
|
extern int sysctl_extfrag_threshold;
|
|
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
|
|
|
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
|
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
int order, gfp_t gfp_mask, nodemask_t *mask,
|
|
bool sync, bool *contended);
|
|
extern int compact_pgdat(pg_data_t *pgdat, int order);
|
|
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
|
extern unsigned long compaction_suitable(struct zone *zone, int order);
|
|
|
|
/* Do not skip compaction more than 64 times */
|
|
#define COMPACT_MAX_DEFER_SHIFT 6
|
|
|
|
/*
|
|
* Compaction is deferred when compaction fails to result in a page
|
|
* allocation success. 1 << compact_defer_limit compactions are skipped up
|
|
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
|
|
*/
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
|
{
|
|
zone->compact_considered = 0;
|
|
zone->compact_defer_shift++;
|
|
|
|
if (order < zone->compact_order_failed)
|
|
zone->compact_order_failed = order;
|
|
|
|
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
|
|
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
|
|
}
|
|
|
|
/* Returns true if compaction should be skipped this time */
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
|
{
|
|
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
|
|
|
|
if (order < zone->compact_order_failed)
|
|
return false;
|
|
|
|
/* Avoid possible overflow */
|
|
if (++zone->compact_considered > defer_limit)
|
|
zone->compact_considered = defer_limit;
|
|
|
|
return zone->compact_considered < defer_limit;
|
|
}
|
|
|
|
/* Returns true if restarting compaction after many failures */
|
|
static inline bool compaction_restarting(struct zone *zone, int order)
|
|
{
|
|
if (order < zone->compact_order_failed)
|
|
return false;
|
|
|
|
return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
|
|
zone->compact_considered >= 1UL << zone->compact_defer_shift;
|
|
}
|
|
|
|
#else
|
|
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
|
int order, gfp_t gfp_mask, nodemask_t *nodemask,
|
|
bool sync, bool *contended)
|
|
{
|
|
return COMPACT_CONTINUE;
|
|
}
|
|
|
|
static inline int compact_pgdat(pg_data_t *pgdat, int order)
|
|
{
|
|
return COMPACT_CONTINUE;
|
|
}
|
|
|
|
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long compaction_suitable(struct zone *zone, int order)
|
|
{
|
|
return COMPACT_SKIPPED;
|
|
}
|
|
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
|
{
|
|
}
|
|
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#endif /* CONFIG_COMPACTION */
|
|
|
|
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
|
|
extern int compaction_register_node(struct node *node);
|
|
extern void compaction_unregister_node(struct node *node);
|
|
|
|
#else
|
|
|
|
static inline int compaction_register_node(struct node *node)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void compaction_unregister_node(struct node *node)
|
|
{
|
|
}
|
|
#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
|
|
|
|
#endif /* _LINUX_COMPACTION_H */
|