mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-03 07:58:02 +07:00
mm: page_alloc: generalize the dirty balance reserve
The dirty balance reserve that dirty throttling has to consider is merely memory not available to userspace allocations. There is nothing writeback-specific about it. Generalize the name so that it's reusable outside of that context. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c20cd45eb0
commit
a8d0143730
@ -356,10 +356,10 @@ struct zone {
|
|||||||
struct per_cpu_pageset __percpu *pageset;
|
struct per_cpu_pageset __percpu *pageset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a per-zone reserve of pages that should not be
|
* This is a per-zone reserve of pages that are not available
|
||||||
* considered dirtyable memory.
|
* to userspace allocations.
|
||||||
*/
|
*/
|
||||||
unsigned long dirty_balance_reserve;
|
unsigned long totalreserve_pages;
|
||||||
|
|
||||||
#ifndef CONFIG_SPARSEMEM
|
#ifndef CONFIG_SPARSEMEM
|
||||||
/*
|
/*
|
||||||
|
@ -287,7 +287,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
|
|||||||
/* linux/mm/page_alloc.c */
|
/* linux/mm/page_alloc.c */
|
||||||
extern unsigned long totalram_pages;
|
extern unsigned long totalram_pages;
|
||||||
extern unsigned long totalreserve_pages;
|
extern unsigned long totalreserve_pages;
|
||||||
extern unsigned long dirty_balance_reserve;
|
|
||||||
extern unsigned long nr_free_buffer_pages(void);
|
extern unsigned long nr_free_buffer_pages(void);
|
||||||
extern unsigned long nr_free_pagecache_pages(void);
|
extern unsigned long nr_free_pagecache_pages(void);
|
||||||
|
|
||||||
|
@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
|
|||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
|
|
||||||
nr_pages = zone_page_state(zone, NR_FREE_PAGES);
|
nr_pages = zone_page_state(zone, NR_FREE_PAGES);
|
||||||
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
|
/*
|
||||||
|
* Pages reserved for the kernel should not be considered
|
||||||
|
* dirtyable, to prevent a situation where reclaim has to
|
||||||
|
* clean pages in order to balance the zones.
|
||||||
|
*/
|
||||||
|
nr_pages -= min(nr_pages, zone->totalreserve_pages);
|
||||||
|
|
||||||
nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
|
nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
|
||||||
nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
|
nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
|
||||||
@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void)
|
|||||||
unsigned long x;
|
unsigned long x;
|
||||||
|
|
||||||
x = global_page_state(NR_FREE_PAGES);
|
x = global_page_state(NR_FREE_PAGES);
|
||||||
x -= min(x, dirty_balance_reserve);
|
/*
|
||||||
|
* Pages reserved for the kernel should not be considered
|
||||||
|
* dirtyable, to prevent a situation where reclaim has to
|
||||||
|
* clean pages in order to balance the zones.
|
||||||
|
*/
|
||||||
|
x -= min(x, totalreserve_pages);
|
||||||
|
|
||||||
x += global_page_state(NR_INACTIVE_FILE);
|
x += global_page_state(NR_INACTIVE_FILE);
|
||||||
x += global_page_state(NR_ACTIVE_FILE);
|
x += global_page_state(NR_ACTIVE_FILE);
|
||||||
|
@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
|
|||||||
unsigned long totalram_pages __read_mostly;
|
unsigned long totalram_pages __read_mostly;
|
||||||
unsigned long totalreserve_pages __read_mostly;
|
unsigned long totalreserve_pages __read_mostly;
|
||||||
unsigned long totalcma_pages __read_mostly;
|
unsigned long totalcma_pages __read_mostly;
|
||||||
/*
|
|
||||||
* When calculating the number of globally allowed dirty pages, there
|
|
||||||
* is a certain number of per-zone reserves that should not be
|
|
||||||
* considered dirtyable memory. This is the sum of those reserves
|
|
||||||
* over all existing zones that contribute dirtyable memory.
|
|
||||||
*/
|
|
||||||
unsigned long dirty_balance_reserve __read_mostly;
|
|
||||||
|
|
||||||
int percpu_pagelist_fraction;
|
int percpu_pagelist_fraction;
|
||||||
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
|
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
|
||||||
@ -5942,20 +5935,12 @@ static void calculate_totalreserve_pages(void)
|
|||||||
|
|
||||||
if (max > zone->managed_pages)
|
if (max > zone->managed_pages)
|
||||||
max = zone->managed_pages;
|
max = zone->managed_pages;
|
||||||
|
|
||||||
|
zone->totalreserve_pages = max;
|
||||||
|
|
||||||
reserve_pages += max;
|
reserve_pages += max;
|
||||||
/*
|
|
||||||
* Lowmem reserves are not available to
|
|
||||||
* GFP_HIGHUSER page cache allocations and
|
|
||||||
* kswapd tries to balance zones to their high
|
|
||||||
* watermark. As a result, neither should be
|
|
||||||
* regarded as dirtyable memory, to prevent a
|
|
||||||
* situation where reclaim has to clean pages
|
|
||||||
* in order to balance the zones.
|
|
||||||
*/
|
|
||||||
zone->dirty_balance_reserve = max;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dirty_balance_reserve = reserve_pages;
|
|
||||||
totalreserve_pages = reserve_pages;
|
totalreserve_pages = reserve_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user