mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 15:34:43 +07:00
dcda9b0471
__GFP_REPEAT was designed to allow retry-but-eventually-fail semantic to the page allocator. This has been true but only for allocations requests larger than PAGE_ALLOC_COSTLY_ORDER. It has been always ignored for smaller sizes. This is a bit unfortunate because there is no way to express the same semantic for those requests and they are considered too important to fail so they might end up looping in the page allocator for ever, similarly to GFP_NOFAIL requests. Now that the whole tree has been cleaned up and accidental or misled usage of __GFP_REPEAT flag has been removed for !costly requests we can give the original flag a better name and more importantly a more useful semantic. Let's rename it to __GFP_RETRY_MAYFAIL which tells the user that the allocator would try really hard but there is no promise of a success. This will work independent of the order and overrides the default allocator behavior. Page allocator users have several levels of guarantee vs. cost options (take GFP_KERNEL as an example) - GFP_KERNEL & ~__GFP_RECLAIM - optimistic allocation without _any_ attempt to free memory at all. The most light weight mode which even doesn't kick the background reclaim. Should be used carefully because it might deplete the memory and the next user might hit the more aggressive reclaim - GFP_KERNEL & ~__GFP_DIRECT_RECLAIM (or GFP_NOWAIT)- optimistic allocation without any attempt to free memory from the current context but can wake kswapd to reclaim memory if the zone is below the low watermark. Can be used from either atomic contexts or when the request is a performance optimization and there is another fallback for a slow path. - (GFP_KERNEL|__GFP_HIGH) & ~__GFP_DIRECT_RECLAIM (aka GFP_ATOMIC) - non sleeping allocation with an expensive fallback so it can access some portion of memory reserves. Usually used from interrupt/bh context with an expensive slow path fallback. - GFP_KERNEL - both background and direct reclaim are allowed and the _default_ page allocator behavior is used. That means that !costly allocation requests are basically nofail but there is no guarantee of that behavior so failures have to be checked properly by callers (e.g. OOM killer victim is allowed to fail currently). - GFP_KERNEL | __GFP_NORETRY - overrides the default allocator behavior and all allocation requests fail early rather than cause disruptive reclaim (one round of reclaim in this implementation). The OOM killer is not invoked. - GFP_KERNEL | __GFP_RETRY_MAYFAIL - overrides the default allocator behavior and all allocation requests try really hard. The request will fail if the reclaim cannot make any progress. The OOM killer won't be triggered. - GFP_KERNEL | __GFP_NOFAIL - overrides the default allocator behavior and all allocation requests will loop endlessly until they succeed. This might be really dangerous especially for larger orders. Existing users of __GFP_REPEAT are changed to __GFP_RETRY_MAYFAIL because they already had their semantic. No new users are added. __alloc_pages_slowpath is changed to bail out for __GFP_RETRY_MAYFAIL if there is no progress and we have already passed the OOM point. This means that all the reclaim opportunities have been exhausted except the most disruptive one (the OOM killer) and a user defined fallback behavior is more sensible than keep retrying in the page allocator. [akpm@linux-foundation.org: fix arch/sparc/kernel/mdesc.c] [mhocko@suse.com: semantic fix] Link: http://lkml.kernel.org/r/20170626123847.GM11534@dhcp22.suse.cz [mhocko@kernel.org: address other thing spotted by Vlastimil] Link: http://lkml.kernel.org/r/20170626124233.GN11534@dhcp22.suse.cz Link: http://lkml.kernel.org/r/20170623085345.11304-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alex Belits <alex.belits@cavium.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Christoph Hellwig <hch@infradead.org> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: David Daney <david.daney@cavium.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: NeilBrown <neilb@suse.com> Cc: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
272 lines
8.5 KiB
C
272 lines
8.5 KiB
C
#include <linux/node.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/compaction.h>
|
|
/*
|
|
* The order of these masks is important. Matching masks will be seen
|
|
* first and the left over flags will end up showing by themselves.
|
|
*
|
|
* For example, if we have GFP_KERNEL before GFP_USER we wil get:
|
|
*
|
|
* GFP_KERNEL|GFP_HARDWALL
|
|
*
|
|
* Thus most bits set go first.
|
|
*/
|
|
|
|
#define __def_gfpflag_names \
|
|
{(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
|
|
{(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
|
|
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
|
|
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
|
|
{(unsigned long)GFP_USER, "GFP_USER"}, \
|
|
{(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
|
|
{(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
|
|
{(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
|
|
{(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
|
|
{(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
|
|
{(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
|
|
{(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
|
|
{(unsigned long)GFP_DMA, "GFP_DMA"}, \
|
|
{(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
|
|
{(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
|
|
{(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
|
|
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
|
|
{(unsigned long)__GFP_IO, "__GFP_IO"}, \
|
|
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
|
|
{(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
|
|
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
|
|
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
|
|
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
|
|
{(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
|
|
{(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
|
|
{(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
|
|
{(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
|
|
{(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
|
|
{(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
|
|
{(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
|
|
{(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
|
|
{(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
|
|
{(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
|
|
{(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
|
|
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
|
|
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
|
|
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
|
|
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
|
|
|
|
#define show_gfp_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_gfpflag_names \
|
|
) : "none"
|
|
|
|
#ifdef CONFIG_MMU
|
|
#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_MLOCK(flag,string)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
|
|
#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_UNCACHED(flag,string)
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_HWPOISON(flag,string)
|
|
#endif
|
|
|
|
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
|
#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_IDLE(flag,string)
|
|
#endif
|
|
|
|
#define __def_pageflag_names \
|
|
{1UL << PG_locked, "locked" }, \
|
|
{1UL << PG_waiters, "waiters" }, \
|
|
{1UL << PG_error, "error" }, \
|
|
{1UL << PG_referenced, "referenced" }, \
|
|
{1UL << PG_uptodate, "uptodate" }, \
|
|
{1UL << PG_dirty, "dirty" }, \
|
|
{1UL << PG_lru, "lru" }, \
|
|
{1UL << PG_active, "active" }, \
|
|
{1UL << PG_slab, "slab" }, \
|
|
{1UL << PG_owner_priv_1, "owner_priv_1" }, \
|
|
{1UL << PG_arch_1, "arch_1" }, \
|
|
{1UL << PG_reserved, "reserved" }, \
|
|
{1UL << PG_private, "private" }, \
|
|
{1UL << PG_private_2, "private_2" }, \
|
|
{1UL << PG_writeback, "writeback" }, \
|
|
{1UL << PG_head, "head" }, \
|
|
{1UL << PG_mappedtodisk, "mappedtodisk" }, \
|
|
{1UL << PG_reclaim, "reclaim" }, \
|
|
{1UL << PG_swapbacked, "swapbacked" }, \
|
|
{1UL << PG_unevictable, "unevictable" } \
|
|
IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
|
|
IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
|
|
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
|
|
IF_HAVE_PG_IDLE(PG_young, "young" ) \
|
|
IF_HAVE_PG_IDLE(PG_idle, "idle" )
|
|
|
|
#define show_page_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_pageflag_names \
|
|
) : "none"
|
|
|
|
#if defined(CONFIG_X86)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
|
|
#elif defined(CONFIG_PPC)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
|
|
#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
|
|
#elif !defined(CONFIG_MMU)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
|
|
#else
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
|
|
#endif
|
|
|
|
#if defined(CONFIG_X86)
|
|
#define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" }
|
|
#else
|
|
#define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" }
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
|
|
#else
|
|
#define IF_HAVE_VM_SOFTDIRTY(flag,name)
|
|
#endif
|
|
|
|
#define __def_vmaflag_names \
|
|
{VM_READ, "read" }, \
|
|
{VM_WRITE, "write" }, \
|
|
{VM_EXEC, "exec" }, \
|
|
{VM_SHARED, "shared" }, \
|
|
{VM_MAYREAD, "mayread" }, \
|
|
{VM_MAYWRITE, "maywrite" }, \
|
|
{VM_MAYEXEC, "mayexec" }, \
|
|
{VM_MAYSHARE, "mayshare" }, \
|
|
{VM_GROWSDOWN, "growsdown" }, \
|
|
{VM_UFFD_MISSING, "uffd_missing" }, \
|
|
{VM_PFNMAP, "pfnmap" }, \
|
|
{VM_DENYWRITE, "denywrite" }, \
|
|
{VM_UFFD_WP, "uffd_wp" }, \
|
|
{VM_LOCKED, "locked" }, \
|
|
{VM_IO, "io" }, \
|
|
{VM_SEQ_READ, "seqread" }, \
|
|
{VM_RAND_READ, "randread" }, \
|
|
{VM_DONTCOPY, "dontcopy" }, \
|
|
{VM_DONTEXPAND, "dontexpand" }, \
|
|
{VM_LOCKONFAULT, "lockonfault" }, \
|
|
{VM_ACCOUNT, "account" }, \
|
|
{VM_NORESERVE, "noreserve" }, \
|
|
{VM_HUGETLB, "hugetlb" }, \
|
|
__VM_ARCH_SPECIFIC_1 , \
|
|
__VM_ARCH_SPECIFIC_2 , \
|
|
{VM_DONTDUMP, "dontdump" }, \
|
|
IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
|
|
{VM_MIXEDMAP, "mixedmap" }, \
|
|
{VM_HUGEPAGE, "hugepage" }, \
|
|
{VM_NOHUGEPAGE, "nohugepage" }, \
|
|
{VM_MERGEABLE, "mergeable" } \
|
|
|
|
#define show_vma_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_vmaflag_names \
|
|
) : "none"
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
#define COMPACTION_STATUS \
|
|
EM( COMPACT_SKIPPED, "skipped") \
|
|
EM( COMPACT_DEFERRED, "deferred") \
|
|
EM( COMPACT_CONTINUE, "continue") \
|
|
EM( COMPACT_SUCCESS, "success") \
|
|
EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
|
|
EM( COMPACT_COMPLETE, "complete") \
|
|
EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
|
|
EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
|
|
EMe(COMPACT_CONTENDED, "contended")
|
|
|
|
/* High-level compaction status feedback */
|
|
#define COMPACTION_FAILED 1
|
|
#define COMPACTION_WITHDRAWN 2
|
|
#define COMPACTION_PROGRESS 3
|
|
|
|
#define compact_result_to_feedback(result) \
|
|
({ \
|
|
enum compact_result __result = result; \
|
|
(compaction_failed(__result)) ? COMPACTION_FAILED : \
|
|
(compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
|
|
})
|
|
|
|
#define COMPACTION_FEEDBACK \
|
|
EM(COMPACTION_FAILED, "failed") \
|
|
EM(COMPACTION_WITHDRAWN, "withdrawn") \
|
|
EMe(COMPACTION_PROGRESS, "progress")
|
|
|
|
#define COMPACTION_PRIORITY \
|
|
EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \
|
|
EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \
|
|
EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC")
|
|
#else
|
|
#define COMPACTION_STATUS
|
|
#define COMPACTION_PRIORITY
|
|
#define COMPACTION_FEEDBACK
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
#define IFDEF_ZONE_DMA(X) X
|
|
#else
|
|
#define IFDEF_ZONE_DMA(X)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA32
|
|
#define IFDEF_ZONE_DMA32(X) X
|
|
#else
|
|
#define IFDEF_ZONE_DMA32(X)
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#define IFDEF_ZONE_HIGHMEM(X) X
|
|
#else
|
|
#define IFDEF_ZONE_HIGHMEM(X)
|
|
#endif
|
|
|
|
#define ZONE_TYPE \
|
|
IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
|
|
IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
|
|
EM (ZONE_NORMAL, "Normal") \
|
|
IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
|
|
EMe(ZONE_MOVABLE,"Movable")
|
|
|
|
#define LRU_NAMES \
|
|
EM (LRU_INACTIVE_ANON, "inactive_anon") \
|
|
EM (LRU_ACTIVE_ANON, "active_anon") \
|
|
EM (LRU_INACTIVE_FILE, "inactive_file") \
|
|
EM (LRU_ACTIVE_FILE, "active_file") \
|
|
EMe(LRU_UNEVICTABLE, "unevictable")
|
|
|
|
/*
|
|
* First define the enums in the above macros to be exported to userspace
|
|
* via TRACE_DEFINE_ENUM().
|
|
*/
|
|
#undef EM
|
|
#undef EMe
|
|
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
|
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
COMPACTION_STATUS
|
|
COMPACTION_PRIORITY
|
|
/* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
|
|
ZONE_TYPE
|
|
LRU_NAMES
|
|
|
|
/*
|
|
* Now redefine the EM() and EMe() macros to map the enums to the strings
|
|
* that will be printed in the output.
|
|
*/
|
|
#undef EM
|
|
#undef EMe
|
|
#define EM(a, b) {a, b},
|
|
#define EMe(a, b) {a, b}
|