mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 16:07:57 +07:00
d2cd9ede6e
Introduce MADV_WIPEONFORK semantics, which result in a VMA being empty in the child process after fork. This differs from MADV_DONTFORK in one important way. If a child process accesses memory that was MADV_WIPEONFORK, it will get zeroes. The address ranges are still valid, they are just empty. If a child process accesses memory that was MADV_DONTFORK, it will get a segmentation fault, since those address ranges are no longer valid in the child after fork. Since MADV_DONTFORK also seems to be used to allow very large programs to fork in systems with strict memory overcommit restrictions, changing the semantics of MADV_DONTFORK might break existing programs. MADV_WIPEONFORK only works on private, anonymous VMAs. The use case is libraries that store or cache information, and want to know that they need to regenerate it in the child process after fork. Examples of this would be: - systemd/pulseaudio API checks (fail after fork) (replacing a getpid check, which is too slow without a PID cache) - PKCS#11 API reinitialization check (mandated by specification) - glibc's upcoming PRNG (reseed after fork) - OpenSSL PRNG (reseed after fork) The security benefits of a forking server having a re-inialized PRNG in every child process are pretty obvious. However, due to libraries having all kinds of internal state, and programs getting compiled with many different versions of each library, it is unreasonable to expect calling programs to re-initialize everything manually after fork. A further complication is the proliferation of clone flags, programs bypassing glibc's functions to call clone directly, and programs calling unshare, causing the glibc pthread_atfork hook to not get called. It would be better to have the kernel take care of this automatically. The patch also adds MADV_KEEPONFORK, to undo the effects of a prior MADV_WIPEONFORK. This is similar to the OpenBSD minherit syscall with MAP_INHERIT_ZERO: https://man.openbsd.org/minherit.2 [akpm@linux-foundation.org: numerically order arch/parisc/include/uapi/asm/mman.h #defines] Link: http://lkml.kernel.org/r/20170811212829.29186-3-riel@redhat.com Signed-off-by: Rik van Riel <riel@redhat.com> Reported-by: Florian Weimer <fweimer@redhat.com> Reported-by: Colm MacCártaigh <colm@allcosts.net> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Helge Deller <deller@gmx.de> Cc: Kees Cook <keescook@chromium.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Drewry <wad@chromium.org> Cc: <linux-api@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
266 lines
8.4 KiB
C
266 lines
8.4 KiB
C
#include <linux/node.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/compaction.h>
|
|
/*
|
|
* The order of these masks is important. Matching masks will be seen
|
|
* first and the left over flags will end up showing by themselves.
|
|
*
|
|
* For example, if we have GFP_KERNEL before GFP_USER we wil get:
|
|
*
|
|
* GFP_KERNEL|GFP_HARDWALL
|
|
*
|
|
* Thus most bits set go first.
|
|
*/
|
|
|
|
#define __def_gfpflag_names \
|
|
{(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
|
|
{(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \
|
|
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\
|
|
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
|
|
{(unsigned long)GFP_USER, "GFP_USER"}, \
|
|
{(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
|
|
{(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \
|
|
{(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
|
|
{(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
|
|
{(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
|
|
{(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
|
|
{(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \
|
|
{(unsigned long)GFP_DMA, "GFP_DMA"}, \
|
|
{(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \
|
|
{(unsigned long)GFP_DMA32, "GFP_DMA32"}, \
|
|
{(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \
|
|
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
|
|
{(unsigned long)__GFP_IO, "__GFP_IO"}, \
|
|
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
|
|
{(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
|
|
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
|
|
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
|
|
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
|
|
{(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \
|
|
{(unsigned long)__GFP_COMP, "__GFP_COMP"}, \
|
|
{(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \
|
|
{(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \
|
|
{(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \
|
|
{(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \
|
|
{(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \
|
|
{(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
|
|
{(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
|
|
{(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
|
|
{(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
|
|
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
|
|
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
|
|
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
|
|
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
|
|
|
|
#define show_gfp_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_gfpflag_names \
|
|
) : "none"
|
|
|
|
#ifdef CONFIG_MMU
|
|
#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_MLOCK(flag,string)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
|
|
#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_UNCACHED(flag,string)
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_HWPOISON(flag,string)
|
|
#endif
|
|
|
|
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
|
#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
|
|
#else
|
|
#define IF_HAVE_PG_IDLE(flag,string)
|
|
#endif
|
|
|
|
#define __def_pageflag_names \
|
|
{1UL << PG_locked, "locked" }, \
|
|
{1UL << PG_waiters, "waiters" }, \
|
|
{1UL << PG_error, "error" }, \
|
|
{1UL << PG_referenced, "referenced" }, \
|
|
{1UL << PG_uptodate, "uptodate" }, \
|
|
{1UL << PG_dirty, "dirty" }, \
|
|
{1UL << PG_lru, "lru" }, \
|
|
{1UL << PG_active, "active" }, \
|
|
{1UL << PG_slab, "slab" }, \
|
|
{1UL << PG_owner_priv_1, "owner_priv_1" }, \
|
|
{1UL << PG_arch_1, "arch_1" }, \
|
|
{1UL << PG_reserved, "reserved" }, \
|
|
{1UL << PG_private, "private" }, \
|
|
{1UL << PG_private_2, "private_2" }, \
|
|
{1UL << PG_writeback, "writeback" }, \
|
|
{1UL << PG_head, "head" }, \
|
|
{1UL << PG_mappedtodisk, "mappedtodisk" }, \
|
|
{1UL << PG_reclaim, "reclaim" }, \
|
|
{1UL << PG_swapbacked, "swapbacked" }, \
|
|
{1UL << PG_unevictable, "unevictable" } \
|
|
IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \
|
|
IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
|
|
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
|
|
IF_HAVE_PG_IDLE(PG_young, "young" ) \
|
|
IF_HAVE_PG_IDLE(PG_idle, "idle" )
|
|
|
|
#define show_page_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_pageflag_names \
|
|
) : "none"
|
|
|
|
#if defined(CONFIG_X86)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
|
|
#elif defined(CONFIG_PPC)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
|
|
#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
|
|
#elif !defined(CONFIG_MMU)
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
|
|
#else
|
|
#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
|
|
#else
|
|
#define IF_HAVE_VM_SOFTDIRTY(flag,name)
|
|
#endif
|
|
|
|
#define __def_vmaflag_names \
|
|
{VM_READ, "read" }, \
|
|
{VM_WRITE, "write" }, \
|
|
{VM_EXEC, "exec" }, \
|
|
{VM_SHARED, "shared" }, \
|
|
{VM_MAYREAD, "mayread" }, \
|
|
{VM_MAYWRITE, "maywrite" }, \
|
|
{VM_MAYEXEC, "mayexec" }, \
|
|
{VM_MAYSHARE, "mayshare" }, \
|
|
{VM_GROWSDOWN, "growsdown" }, \
|
|
{VM_UFFD_MISSING, "uffd_missing" }, \
|
|
{VM_PFNMAP, "pfnmap" }, \
|
|
{VM_DENYWRITE, "denywrite" }, \
|
|
{VM_UFFD_WP, "uffd_wp" }, \
|
|
{VM_LOCKED, "locked" }, \
|
|
{VM_IO, "io" }, \
|
|
{VM_SEQ_READ, "seqread" }, \
|
|
{VM_RAND_READ, "randread" }, \
|
|
{VM_DONTCOPY, "dontcopy" }, \
|
|
{VM_DONTEXPAND, "dontexpand" }, \
|
|
{VM_LOCKONFAULT, "lockonfault" }, \
|
|
{VM_ACCOUNT, "account" }, \
|
|
{VM_NORESERVE, "noreserve" }, \
|
|
{VM_HUGETLB, "hugetlb" }, \
|
|
__VM_ARCH_SPECIFIC_1 , \
|
|
{VM_WIPEONFORK, "wipeonfork" }, \
|
|
{VM_DONTDUMP, "dontdump" }, \
|
|
IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
|
|
{VM_MIXEDMAP, "mixedmap" }, \
|
|
{VM_HUGEPAGE, "hugepage" }, \
|
|
{VM_NOHUGEPAGE, "nohugepage" }, \
|
|
{VM_MERGEABLE, "mergeable" } \
|
|
|
|
#define show_vma_flags(flags) \
|
|
(flags) ? __print_flags(flags, "|", \
|
|
__def_vmaflag_names \
|
|
) : "none"
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
#define COMPACTION_STATUS \
|
|
EM( COMPACT_SKIPPED, "skipped") \
|
|
EM( COMPACT_DEFERRED, "deferred") \
|
|
EM( COMPACT_CONTINUE, "continue") \
|
|
EM( COMPACT_SUCCESS, "success") \
|
|
EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
|
|
EM( COMPACT_COMPLETE, "complete") \
|
|
EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
|
|
EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
|
|
EMe(COMPACT_CONTENDED, "contended")
|
|
|
|
/* High-level compaction status feedback */
|
|
#define COMPACTION_FAILED 1
|
|
#define COMPACTION_WITHDRAWN 2
|
|
#define COMPACTION_PROGRESS 3
|
|
|
|
#define compact_result_to_feedback(result) \
|
|
({ \
|
|
enum compact_result __result = result; \
|
|
(compaction_failed(__result)) ? COMPACTION_FAILED : \
|
|
(compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
|
|
})
|
|
|
|
#define COMPACTION_FEEDBACK \
|
|
EM(COMPACTION_FAILED, "failed") \
|
|
EM(COMPACTION_WITHDRAWN, "withdrawn") \
|
|
EMe(COMPACTION_PROGRESS, "progress")
|
|
|
|
#define COMPACTION_PRIORITY \
|
|
EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \
|
|
EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \
|
|
EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC")
|
|
#else
|
|
#define COMPACTION_STATUS
|
|
#define COMPACTION_PRIORITY
|
|
#define COMPACTION_FEEDBACK
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
#define IFDEF_ZONE_DMA(X) X
|
|
#else
|
|
#define IFDEF_ZONE_DMA(X)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA32
|
|
#define IFDEF_ZONE_DMA32(X) X
|
|
#else
|
|
#define IFDEF_ZONE_DMA32(X)
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#define IFDEF_ZONE_HIGHMEM(X) X
|
|
#else
|
|
#define IFDEF_ZONE_HIGHMEM(X)
|
|
#endif
|
|
|
|
#define ZONE_TYPE \
|
|
IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
|
|
IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
|
|
EM (ZONE_NORMAL, "Normal") \
|
|
IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
|
|
EMe(ZONE_MOVABLE,"Movable")
|
|
|
|
#define LRU_NAMES \
|
|
EM (LRU_INACTIVE_ANON, "inactive_anon") \
|
|
EM (LRU_ACTIVE_ANON, "active_anon") \
|
|
EM (LRU_INACTIVE_FILE, "inactive_file") \
|
|
EM (LRU_ACTIVE_FILE, "active_file") \
|
|
EMe(LRU_UNEVICTABLE, "unevictable")
|
|
|
|
/*
|
|
* First define the enums in the above macros to be exported to userspace
|
|
* via TRACE_DEFINE_ENUM().
|
|
*/
|
|
#undef EM
|
|
#undef EMe
|
|
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
|
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
COMPACTION_STATUS
|
|
COMPACTION_PRIORITY
|
|
/* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
|
|
ZONE_TYPE
|
|
LRU_NAMES
|
|
|
|
/*
|
|
* Now redefine the EM() and EMe() macros to map the enums to the strings
|
|
* that will be printed in the output.
|
|
*/
|
|
#undef EM
|
|
#undef EMe
|
|
#define EM(a, b) {a, b},
|
|
#define EMe(a, b) {a, b}
|