mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 11:16:42 +07:00
mm: adjust FOLL_SPLIT for new refcounting
We need to prepare kernel to allow transhuge pages to be mapped with ptes too. We need to handle FOLL_SPLIT in follow_page_pte(). Also we use split_huge_page() directly instead of split_huge_page_pmd(). split_huge_page_pmd() will gone. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1f25fe20a7
commit
6742d293cb
71
mm/gup.c
71
mm/gup.c
@ -116,6 +116,19 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
|
||||
int ret;
|
||||
get_page(page);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
lock_page(page);
|
||||
ret = split_huge_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (flags & FOLL_GET)
|
||||
get_page_foll(page);
|
||||
if (flags & FOLL_TOUCH) {
|
||||
@ -220,27 +233,45 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
|
||||
}
|
||||
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
|
||||
return no_page_table(vma, flags);
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
if (flags & FOLL_SPLIT) {
|
||||
split_huge_page_pmd(vma, address, pmd);
|
||||
return follow_page_pte(vma, address, pmd, flags);
|
||||
}
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
if (likely(pmd_trans_huge(*pmd))) {
|
||||
if (unlikely(pmd_trans_splitting(*pmd))) {
|
||||
spin_unlock(ptl);
|
||||
wait_split_huge_page(vma->anon_vma, pmd);
|
||||
} else {
|
||||
page = follow_trans_huge_pmd(vma, address,
|
||||
pmd, flags);
|
||||
spin_unlock(ptl);
|
||||
*page_mask = HPAGE_PMD_NR - 1;
|
||||
return page;
|
||||
}
|
||||
} else
|
||||
spin_unlock(ptl);
|
||||
if (likely(!pmd_trans_huge(*pmd)))
|
||||
return follow_page_pte(vma, address, pmd, flags);
|
||||
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
if (unlikely(!pmd_trans_huge(*pmd))) {
|
||||
spin_unlock(ptl);
|
||||
return follow_page_pte(vma, address, pmd, flags);
|
||||
}
|
||||
return follow_page_pte(vma, address, pmd, flags);
|
||||
|
||||
if (unlikely(pmd_trans_splitting(*pmd))) {
|
||||
spin_unlock(ptl);
|
||||
wait_split_huge_page(vma->anon_vma, pmd);
|
||||
return follow_page_pte(vma, address, pmd, flags);
|
||||
}
|
||||
|
||||
if (flags & FOLL_SPLIT) {
|
||||
int ret;
|
||||
page = pmd_page(*pmd);
|
||||
if (is_huge_zero_page(page)) {
|
||||
spin_unlock(ptl);
|
||||
ret = 0;
|
||||
split_huge_page_pmd(vma, address, pmd);
|
||||
} else {
|
||||
get_page(page);
|
||||
spin_unlock(ptl);
|
||||
lock_page(page);
|
||||
ret = split_huge_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
return ret ? ERR_PTR(ret) :
|
||||
follow_page_pte(vma, address, pmd, flags);
|
||||
}
|
||||
|
||||
page = follow_trans_huge_pmd(vma, address, pmd, flags);
|
||||
spin_unlock(ptl);
|
||||
*page_mask = HPAGE_PMD_NR - 1;
|
||||
return page;
|
||||
}
|
||||
|
||||
static int get_gate_page(struct mm_struct *mm, unsigned long address,
|
||||
|
Loading…
Reference in New Issue
Block a user