mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 16:01:14 +07:00
mm/page_alloc: convert zone_pcp_update() to rely on memory barriers instead of stop_machine()
zone_pcp_update()'s goal is to adjust the ->high and ->mark members of a percpu pageset based on a zone's ->managed_pages. We don't need to drain the entire percpu pageset just to modify these fields. This lets us avoid calling setup_pageset() (and the draining required to call it) and instead allows simply setting the fields' values (with some attention paid to memory barriers to prevent the relationship between ->batch and ->high from being thrown off). This does change the behavior of zone_pcp_update() as the percpu pagesets will not be drained when zone_pcp_update() is called (they will end up being shrunk, not completely drained, later when a 0-order page is freed in free_hot_cold_page()). Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
998d39cb23
commit
0a647f3811
@ -6085,33 +6085,18 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static int __meminit __zone_pcp_update(void *data)
|
||||
{
|
||||
struct zone *zone = data;
|
||||
int cpu;
|
||||
unsigned long batch = zone_batchsize(zone), flags;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct per_cpu_pageset *pset;
|
||||
struct per_cpu_pages *pcp;
|
||||
|
||||
pset = per_cpu_ptr(zone->pageset, cpu);
|
||||
pcp = &pset->pcp;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (pcp->count > 0)
|
||||
free_pcppages_bulk(zone, pcp->count, pcp);
|
||||
drain_zonestat(zone, pset);
|
||||
setup_pageset(pset, batch);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The zone indicated has a new number of managed_pages; batch sizes and percpu
|
||||
* page high values need to be recalulated.
|
||||
*/
|
||||
void __meminit zone_pcp_update(struct zone *zone)
|
||||
{
|
||||
unsigned cpu;
|
||||
unsigned long batch;
|
||||
mutex_lock(&pcp_batch_high_lock);
|
||||
stop_machine(__zone_pcp_update, zone, NULL);
|
||||
batch = zone_batchsize(zone);
|
||||
for_each_possible_cpu(cpu)
|
||||
pageset_set_batch(per_cpu_ptr(zone->pageset, cpu), batch);
|
||||
mutex_unlock(&pcp_batch_high_lock);
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user