mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 13:20:52 +07:00
21dc7e0236
It is pointless to migrate hugetlb memory as part of memory compaction if the hugetlb size is equal to the pageblock order. No defragmentation is occurring in this condition. It is also pointless to for the freeing scanner to scan a pageblock where a hugetlb page is pinned. Unconditionally skip these pageblocks, and do so peristently so that they are not rescanned until it is observed that these hugepages are no longer pinned. It would also be possible to do this by involving the hugetlb subsystem in marking pageblocks to no longer be skipped when they hugetlb pages are freed. This is a simple solution that doesn't involve any additional subsystems in pageblock skip manipulation. [rientjes@google.com: fix build] Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708201734390.117182@chino.kir.corp.google.com Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708151639130.106658@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Tested-by: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
113 lines
3.3 KiB
C
113 lines
3.3 KiB
C
/*
|
|
* Macros for manipulating and testing flags related to a
|
|
* pageblock_nr_pages number of pages.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation version 2 of the License
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
*
|
|
* Copyright (C) IBM Corporation, 2006
|
|
*
|
|
* Original author, Mel Gorman
|
|
* Major cleanups and reduction of bit operations, Andy Whitcroft
|
|
*/
|
|
#ifndef PAGEBLOCK_FLAGS_H
|
|
#define PAGEBLOCK_FLAGS_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
/* Bit indices that affect a whole block of pages */
|
|
enum pageblock_bits {
|
|
PB_migrate,
|
|
PB_migrate_end = PB_migrate + 3 - 1,
|
|
/* 3 bits required for migrate types */
|
|
PB_migrate_skip,/* If set the block is skipped by compaction */
|
|
|
|
/*
|
|
* Assume the bits will always align on a word. If this assumption
|
|
* changes then get/set pageblock needs updating.
|
|
*/
|
|
NR_PAGEBLOCK_BITS
|
|
};
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
|
|
|
|
/* Huge page sizes are variable */
|
|
extern unsigned int pageblock_order;
|
|
|
|
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
|
|
|
|
/* Huge pages are a constant size */
|
|
#define pageblock_order HUGETLB_PAGE_ORDER
|
|
|
|
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
|
|
|
|
#else /* CONFIG_HUGETLB_PAGE */
|
|
|
|
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
|
|
#define pageblock_order (MAX_ORDER-1)
|
|
|
|
#endif /* CONFIG_HUGETLB_PAGE */
|
|
|
|
#define pageblock_nr_pages (1UL << pageblock_order)
|
|
|
|
/* Forward declaration */
|
|
struct page;
|
|
|
|
unsigned long get_pfnblock_flags_mask(struct page *page,
|
|
unsigned long pfn,
|
|
unsigned long end_bitidx,
|
|
unsigned long mask);
|
|
|
|
void set_pfnblock_flags_mask(struct page *page,
|
|
unsigned long flags,
|
|
unsigned long pfn,
|
|
unsigned long end_bitidx,
|
|
unsigned long mask);
|
|
|
|
/* Declarations for getting and setting flags. See mm/page_alloc.c */
|
|
#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
|
|
get_pfnblock_flags_mask(page, page_to_pfn(page), \
|
|
end_bitidx, \
|
|
(1 << (end_bitidx - start_bitidx + 1)) - 1)
|
|
#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
|
|
set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
|
|
end_bitidx, \
|
|
(1 << (end_bitidx - start_bitidx + 1)) - 1)
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
#define get_pageblock_skip(page) \
|
|
get_pageblock_flags_group(page, PB_migrate_skip, \
|
|
PB_migrate_skip)
|
|
#define clear_pageblock_skip(page) \
|
|
set_pageblock_flags_group(page, 0, PB_migrate_skip, \
|
|
PB_migrate_skip)
|
|
#define set_pageblock_skip(page) \
|
|
set_pageblock_flags_group(page, 1, PB_migrate_skip, \
|
|
PB_migrate_skip)
|
|
#else
|
|
static inline bool get_pageblock_skip(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void clear_pageblock_skip(struct page *page)
|
|
{
|
|
}
|
|
static inline void set_pageblock_skip(struct page *page)
|
|
{
|
|
}
|
|
#endif /* CONFIG_COMPACTION */
|
|
|
|
#endif /* PAGEBLOCK_FLAGS_H */
|