mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 11:30:54 +07:00
6d50e60cd2
If an anonymous mapping is not allowed to fault thp memory and then madvise(MADV_HUGEPAGE) is used after fault, khugepaged will never collapse this memory into thp memory. This occurs because the madvise(2) handler for thp, hugepage_madvise(), clears VM_NOHUGEPAGE on the stack and it isn't stored in vma->vm_flags until the final action of madvise_behavior(). This causes the khugepaged_enter_vma_merge() to be a no-op in hugepage_madvise() when the vma had previously had VM_NOHUGEPAGE set. Fix this by passing the correct vma flags to the khugepaged mm slot handler. There's no chance khugepaged can run on this vma until after madvise_behavior() returns since we hold mm->mmap_sem. It would be possible to clear VM_NOHUGEPAGE directly from vma->vm_flags in hugepage_advise(), but I didn't want to introduce special case behavior into madvise_behavior(). I think it's best to just let it always set vma->vm_flags itself. Signed-off-by: David Rientjes <rientjes@google.com> Reported-by: Suleiman Souhlal <suleiman@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
71 lines
2.0 KiB
C
71 lines
2.0 KiB
C
#ifndef _LINUX_KHUGEPAGED_H
|
|
#define _LINUX_KHUGEPAGED_H
|
|
|
|
#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
extern int __khugepaged_enter(struct mm_struct *mm);
|
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
|
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
|
unsigned long vm_flags);
|
|
|
|
#define khugepaged_enabled() \
|
|
(transparent_hugepage_flags & \
|
|
((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
|
|
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
|
|
#define khugepaged_always() \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_FLAG))
|
|
#define khugepaged_req_madv() \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
|
|
#define khugepaged_defrag() \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
|
|
|
|
static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
|
|
return __khugepaged_enter(mm);
|
|
return 0;
|
|
}
|
|
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
|
|
__khugepaged_exit(mm);
|
|
}
|
|
|
|
static inline int khugepaged_enter(struct vm_area_struct *vma,
|
|
unsigned long vm_flags)
|
|
{
|
|
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
|
if ((khugepaged_always() ||
|
|
(khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
|
|
!(vm_flags & VM_NOHUGEPAGE))
|
|
if (__khugepaged_enter(vma->vm_mm))
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
}
|
|
static inline int khugepaged_enter(struct vm_area_struct *vma,
|
|
unsigned long vm_flags)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
|
unsigned long vm_flags)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif /* _LINUX_KHUGEPAGED_H */
|