mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 11:56:40 +07:00
702d1a6e07
When hotplug offlining happens on zone A, it starts to mark freed page as MIGRATE_ISOLATE type in buddy for preventing further allocation. (MIGRATE_ISOLATE is very irony type because it's apparently on buddy but we can't allocate them). When the memory shortage happens during hotplug offlining, current task starts to reclaim, then wake up kswapd. Kswapd checks watermark, then go sleep because current zone_watermark_ok_safe doesn't consider MIGRATE_ISOLATE freed page count. Current task continue to reclaim in direct reclaim path without kswapd's helping. The problem is that zone->all_unreclaimable is set by only kswapd so that current task would be looping forever like below. __alloc_pages_slowpath restart: wake_all_kswapd rebalance: __alloc_pages_direct_reclaim do_try_to_free_pages if global_reclaim && !all_unreclaimable return 1; /* It means we did did_some_progress */ skip __alloc_pages_may_oom should_alloc_retry goto rebalance; If we apply KOSAKI's patch[1] which doesn't depends on kswapd about setting zone->all_unreclaimable, we can solve this problem by killing some task in direct reclaim path. But it doesn't wake up kswapd, still. It could be a problem still if other subsystem needs GFP_ATOMIC request. So kswapd should consider MIGRATE_ISOLATE when it calculate free pages BEFORE going sleep. This patch counts the number of MIGRATE_ISOLATE page block and zone_watermark_ok_safe will consider it if the system has such blocks (fortunately, it's very rare so no problem in POV overhead and kswapd is never hotpath). Copy/modify from Mel's quote " Ideal solution would be "allocating" the pageblock. It would keep the free space accounting as it is but historically, memory hotplug didn't allocate pages because it would be difficult to detect if a pageblock was isolated or if part of some balloon. Allocating just full pageblocks would work around this, However, it would play very badly with CMA. " [1] http://lkml.org/lkml/2012/6/14/74 [akpm@linux-foundation.org: simplify nr_zone_isolate_freepages(), rework zone_watermark_ok_safe() comment, simplify set_pageblock_isolate() and restore_pageblock_isolate()] [akpm@linux-foundation.org: fix CONFIG_MEMORY_ISOLATION=n build] Signed-off-by: Minchan Kim <minchan@kernel.org> Suggested-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Tested-by: Aaditya Kumar <aaditya.kumar.30@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
236 lines
6.2 KiB
C
236 lines
6.2 KiB
C
/*
|
|
* linux/mm/page_isolation.c
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/page-isolation.h>
|
|
#include <linux/pageblock-flags.h>
|
|
#include <linux/memory.h>
|
|
#include "internal.h"
|
|
|
|
/* called while holding zone->lock */
|
|
static void set_pageblock_isolate(struct page *page)
|
|
{
|
|
if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
|
|
return;
|
|
|
|
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
|
|
page_zone(page)->nr_pageblock_isolate++;
|
|
}
|
|
|
|
/* called while holding zone->lock */
|
|
static void restore_pageblock_isolate(struct page *page, int migratetype)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
|
|
return;
|
|
|
|
BUG_ON(zone->nr_pageblock_isolate <= 0);
|
|
set_pageblock_migratetype(page, migratetype);
|
|
zone->nr_pageblock_isolate--;
|
|
}
|
|
|
|
int set_migratetype_isolate(struct page *page)
|
|
{
|
|
struct zone *zone;
|
|
unsigned long flags, pfn;
|
|
struct memory_isolate_notify arg;
|
|
int notifier_ret;
|
|
int ret = -EBUSY;
|
|
|
|
zone = page_zone(page);
|
|
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
|
|
pfn = page_to_pfn(page);
|
|
arg.start_pfn = pfn;
|
|
arg.nr_pages = pageblock_nr_pages;
|
|
arg.pages_found = 0;
|
|
|
|
/*
|
|
* It may be possible to isolate a pageblock even if the
|
|
* migratetype is not MIGRATE_MOVABLE. The memory isolation
|
|
* notifier chain is used by balloon drivers to return the
|
|
* number of pages in a range that are held by the balloon
|
|
* driver to shrink memory. If all the pages are accounted for
|
|
* by balloons, are free, or on the LRU, isolation can continue.
|
|
* Later, for example, when memory hotplug notifier runs, these
|
|
* pages reported as "can be isolated" should be isolated(freed)
|
|
* by the balloon driver through the memory notifier chain.
|
|
*/
|
|
notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
|
|
notifier_ret = notifier_to_errno(notifier_ret);
|
|
if (notifier_ret)
|
|
goto out;
|
|
/*
|
|
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
|
|
* We just check MOVABLE pages.
|
|
*/
|
|
if (!has_unmovable_pages(zone, page, arg.pages_found))
|
|
ret = 0;
|
|
|
|
/*
|
|
* immobile means "not-on-lru" paes. If immobile is larger than
|
|
* removable-by-driver pages reported by notifier, we'll fail.
|
|
*/
|
|
|
|
out:
|
|
if (!ret) {
|
|
set_pageblock_isolate(page);
|
|
move_freepages_block(zone, page, MIGRATE_ISOLATE);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
if (!ret)
|
|
drain_all_pages();
|
|
return ret;
|
|
}
|
|
|
|
void unset_migratetype_isolate(struct page *page, unsigned migratetype)
|
|
{
|
|
struct zone *zone;
|
|
unsigned long flags;
|
|
zone = page_zone(page);
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
|
|
goto out;
|
|
move_freepages_block(zone, page, migratetype);
|
|
restore_pageblock_isolate(page, migratetype);
|
|
out:
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
}
|
|
|
|
static inline struct page *
|
|
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
|
|
{
|
|
int i;
|
|
for (i = 0; i < nr_pages; i++)
|
|
if (pfn_valid_within(pfn + i))
|
|
break;
|
|
if (unlikely(i == nr_pages))
|
|
return NULL;
|
|
return pfn_to_page(pfn + i);
|
|
}
|
|
|
|
/*
|
|
* start_isolate_page_range() -- make page-allocation-type of range of pages
|
|
* to be MIGRATE_ISOLATE.
|
|
* @start_pfn: The lower PFN of the range to be isolated.
|
|
* @end_pfn: The upper PFN of the range to be isolated.
|
|
* @migratetype: migrate type to set in error recovery.
|
|
*
|
|
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
|
|
* the range will never be allocated. Any free pages and pages freed in the
|
|
* future will not be allocated again.
|
|
*
|
|
* start_pfn/end_pfn must be aligned to pageblock_order.
|
|
* Returns 0 on success and -EBUSY if any part of range cannot be isolated.
|
|
*/
|
|
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
unsigned migratetype)
|
|
{
|
|
unsigned long pfn;
|
|
unsigned long undo_pfn;
|
|
struct page *page;
|
|
|
|
BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
|
|
BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
|
|
|
|
for (pfn = start_pfn;
|
|
pfn < end_pfn;
|
|
pfn += pageblock_nr_pages) {
|
|
page = __first_valid_page(pfn, pageblock_nr_pages);
|
|
if (page && set_migratetype_isolate(page)) {
|
|
undo_pfn = pfn;
|
|
goto undo;
|
|
}
|
|
}
|
|
return 0;
|
|
undo:
|
|
for (pfn = start_pfn;
|
|
pfn < undo_pfn;
|
|
pfn += pageblock_nr_pages)
|
|
unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
|
|
|
|
return -EBUSY;
|
|
}
|
|
|
|
/*
|
|
* Make isolated pages available again.
|
|
*/
|
|
int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
unsigned migratetype)
|
|
{
|
|
unsigned long pfn;
|
|
struct page *page;
|
|
BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
|
|
BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
|
|
for (pfn = start_pfn;
|
|
pfn < end_pfn;
|
|
pfn += pageblock_nr_pages) {
|
|
page = __first_valid_page(pfn, pageblock_nr_pages);
|
|
if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
|
|
continue;
|
|
unset_migratetype_isolate(page, migratetype);
|
|
}
|
|
return 0;
|
|
}
|
|
/*
|
|
* Test all pages in the range is free(means isolated) or not.
|
|
* all pages in [start_pfn...end_pfn) must be in the same zone.
|
|
* zone->lock must be held before call this.
|
|
*
|
|
* Returns 1 if all pages in the range are isolated.
|
|
*/
|
|
static int
|
|
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
|
|
{
|
|
struct page *page;
|
|
|
|
while (pfn < end_pfn) {
|
|
if (!pfn_valid_within(pfn)) {
|
|
pfn++;
|
|
continue;
|
|
}
|
|
page = pfn_to_page(pfn);
|
|
if (PageBuddy(page))
|
|
pfn += 1 << page_order(page);
|
|
else if (page_count(page) == 0 &&
|
|
page_private(page) == MIGRATE_ISOLATE)
|
|
pfn += 1;
|
|
else
|
|
break;
|
|
}
|
|
if (pfn < end_pfn)
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
|
|
{
|
|
unsigned long pfn, flags;
|
|
struct page *page;
|
|
struct zone *zone;
|
|
int ret;
|
|
|
|
/*
|
|
* Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
|
|
* is not aligned to pageblock_nr_pages.
|
|
* Then we just check pagetype fist.
|
|
*/
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
|
|
page = __first_valid_page(pfn, pageblock_nr_pages);
|
|
if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
|
|
break;
|
|
}
|
|
page = __first_valid_page(start_pfn, end_pfn - start_pfn);
|
|
if ((pfn < end_pfn) || !page)
|
|
return -EBUSY;
|
|
/* Check all pages are free or Marked as ISOLATED */
|
|
zone = page_zone(page);
|
|
spin_lock_irqsave(&zone->lock, flags);
|
|
ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
return ret ? 0 : -EBUSY;
|
|
}
|