thp: rename split_huge_page_pmd() to split_huge_pmd()

We are going to decouple splitting THP PMD from splitting underlying
compound page.

This patch renames split_huge_page_pmd*() functions to split_huge_pmd*()
to reflect the fact that it doesn't imply page splitting, only PMD.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Jerome Marchand <jmarchan@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2016-01-15 16:52:42 -08:00 committed by Linus Torvalds
parent b1caa957ae
commit 78ddc53473
10 changed files with 25 additions and 35 deletions

View File

@ -135,7 +135,7 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk) unsigned long end, struct mm_walk *walk)
{ {
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
split_huge_page_pmd(vma, addr, pmd); split_huge_pmd(vma, pmd, addr);
return 0; return 0;
} }

View File

@ -175,7 +175,11 @@ static void mark_screen_rdonly(struct mm_struct *mm)
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
goto out; goto out;
pmd = pmd_offset(pud, 0xA0000); pmd = pmd_offset(pud, 0xA0000);
split_huge_page_pmd_mm(mm, 0xA0000, pmd);
if (pmd_trans_huge(*pmd)) {
struct vm_area_struct *vma = find_vma(mm, 0xA0000);
split_huge_pmd(vma, pmd, 0xA0000);
}
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
goto out; goto out;
pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);

View File

@ -102,7 +102,7 @@ static inline int split_huge_page(struct page *page)
} }
extern void __split_huge_page_pmd(struct vm_area_struct *vma, extern void __split_huge_page_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd); unsigned long address, pmd_t *pmd);
#define split_huge_page_pmd(__vma, __address, __pmd) \ #define split_huge_pmd(__vma, __pmd, __address) \
do { \ do { \
pmd_t *____pmd = (__pmd); \ pmd_t *____pmd = (__pmd); \
if (unlikely(pmd_trans_huge(*____pmd))) \ if (unlikely(pmd_trans_huge(*____pmd))) \
@ -117,8 +117,6 @@ extern void __split_huge_page_pmd(struct vm_area_struct *vma,
BUG_ON(pmd_trans_splitting(*____pmd) || \ BUG_ON(pmd_trans_splitting(*____pmd) || \
pmd_trans_huge(*____pmd)); \ pmd_trans_huge(*____pmd)); \
} while (0) } while (0)
extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
pmd_t *pmd);
#if HPAGE_PMD_ORDER >= MAX_ORDER #if HPAGE_PMD_ORDER >= MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator" #error "hugepages can't be allocated by the buddy allocator"
#endif #endif
@ -183,11 +181,9 @@ static inline int split_huge_page(struct page *page)
{ {
return 0; return 0;
} }
#define split_huge_page_pmd(__vma, __address, __pmd) \
do { } while (0)
#define wait_split_huge_page(__anon_vma, __pmd) \ #define wait_split_huge_page(__anon_vma, __pmd) \
do { } while (0) do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd) \ #define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0) do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma, static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice) unsigned long *vm_flags, int advice)

View File

@ -254,7 +254,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (is_huge_zero_page(page)) { if (is_huge_zero_page(page)) {
spin_unlock(ptl); spin_unlock(ptl);
ret = 0; ret = 0;
split_huge_page_pmd(vma, address, pmd); split_huge_pmd(vma, pmd, address);
} else { } else {
get_page(page); get_page(page);
spin_unlock(ptl); spin_unlock(ptl);

View File

@ -1233,13 +1233,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!new_page)) { if (unlikely(!new_page)) {
if (!page) { if (!page) {
split_huge_page_pmd(vma, address, pmd); split_huge_pmd(vma, pmd, address);
ret |= VM_FAULT_FALLBACK; ret |= VM_FAULT_FALLBACK;
} else { } else {
ret = do_huge_pmd_wp_page_fallback(mm, vma, address, ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr); pmd, orig_pmd, page, haddr);
if (ret & VM_FAULT_OOM) { if (ret & VM_FAULT_OOM) {
split_huge_page(page); split_huge_pmd(vma, pmd, address);
ret |= VM_FAULT_FALLBACK; ret |= VM_FAULT_FALLBACK;
} }
put_user_huge_page(page); put_user_huge_page(page);
@ -1252,10 +1252,10 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
true))) { true))) {
put_page(new_page); put_page(new_page);
if (page) { if (page) {
split_huge_page(page); split_huge_pmd(vma, pmd, address);
put_user_huge_page(page); put_user_huge_page(page);
} else } else
split_huge_page_pmd(vma, address, pmd); split_huge_pmd(vma, pmd, address);
ret |= VM_FAULT_FALLBACK; ret |= VM_FAULT_FALLBACK;
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
goto out; goto out;
@ -3131,17 +3131,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
goto again; goto again;
} }
void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, static void split_huge_pmd_address(struct vm_area_struct *vma,
pmd_t *pmd)
{
struct vm_area_struct *vma;
vma = find_vma(mm, address);
BUG_ON(vma == NULL);
split_huge_page_pmd(vma, address, pmd);
}
static void split_huge_page_address(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
pgd_t *pgd; pgd_t *pgd;
@ -3150,7 +3140,7 @@ static void split_huge_page_address(struct mm_struct *mm,
VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
pgd = pgd_offset(mm, address); pgd = pgd_offset(vma->vm_mm, address);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd))
return; return;
@ -3159,13 +3149,13 @@ static void split_huge_page_address(struct mm_struct *mm,
return; return;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd)) if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
return; return;
/* /*
* Caller holds the mmap_sem write mode, so a huge pmd cannot * Caller holds the mmap_sem write mode, so a huge pmd cannot
* materialize from under us. * materialize from under us.
*/ */
split_huge_page_pmd_mm(mm, address, pmd); __split_huge_page_pmd(vma, address, pmd);
} }
void vma_adjust_trans_huge(struct vm_area_struct *vma, void vma_adjust_trans_huge(struct vm_area_struct *vma,
@ -3181,7 +3171,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
if (start & ~HPAGE_PMD_MASK && if (start & ~HPAGE_PMD_MASK &&
(start & HPAGE_PMD_MASK) >= vma->vm_start && (start & HPAGE_PMD_MASK) >= vma->vm_start &&
(start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_page_address(vma->vm_mm, start); split_huge_pmd_address(vma, start);
/* /*
* If the new end address isn't hpage aligned and it could * If the new end address isn't hpage aligned and it could
@ -3191,7 +3181,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
if (end & ~HPAGE_PMD_MASK && if (end & ~HPAGE_PMD_MASK &&
(end & HPAGE_PMD_MASK) >= vma->vm_start && (end & HPAGE_PMD_MASK) >= vma->vm_start &&
(end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_page_address(vma->vm_mm, end); split_huge_pmd_address(vma, end);
/* /*
* If we're also updating the vma->vm_next->vm_start, if the new * If we're also updating the vma->vm_next->vm_start, if the new
@ -3205,6 +3195,6 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
if (nstart & ~HPAGE_PMD_MASK && if (nstart & ~HPAGE_PMD_MASK &&
(nstart & HPAGE_PMD_MASK) >= next->vm_start && (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
(nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
split_huge_page_address(next->vm_mm, nstart); split_huge_pmd_address(next, nstart);
} }
} }

View File

@ -1193,7 +1193,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
BUG(); BUG();
} }
#endif #endif
split_huge_page_pmd(vma, addr, pmd); split_huge_pmd(vma, pmd, addr);
} else if (zap_huge_pmd(tlb, vma, pmd, addr)) } else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next; goto next;
/* fall through */ /* fall through */

View File

@ -493,7 +493,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
split_huge_page_pmd(vma, addr, pmd); split_huge_pmd(vma, pmd, addr);
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;

View File

@ -160,7 +160,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE) if (next - addr != HPAGE_PMD_SIZE)
split_huge_page_pmd(vma, addr, pmd); split_huge_pmd(vma, pmd, addr);
else { else {
int nr_ptes = change_huge_pmd(vma, pmd, addr, int nr_ptes = change_huge_pmd(vma, pmd, addr,
newprot, prot_numa); newprot, prot_numa);

View File

@ -209,7 +209,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
need_flush = true; need_flush = true;
continue; continue;
} else if (!err) { } else if (!err) {
split_huge_page_pmd(vma, old_addr, old_pmd); split_huge_pmd(vma, old_pmd, old_addr);
} }
VM_BUG_ON(pmd_trans_huge(*old_pmd)); VM_BUG_ON(pmd_trans_huge(*old_pmd));
} }

View File

@ -58,7 +58,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
if (!walk->pte_entry) if (!walk->pte_entry)
continue; continue;
split_huge_page_pmd_mm(walk->mm, addr, pmd); split_huge_pmd(walk->vma, pmd, addr);
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
goto again; goto again;
err = walk_pte_range(pmd, addr, next, walk); err = walk_pte_range(pmd, addr, next, walk);