mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:30:52 +07:00
mm: hugetlb: Copy general hugetlb code from x86 to mm.
The huge_pte_alloc, huge_pte_offset and follow_huge_p[mu]d functions in x86/mm/hugetlbpage.c do not rely on any architecture specific knowledge other than the fact that pmds and puds can be treated as huge ptes. To allow other architectures to use this code (and reduce the need for code duplication), this patch copies these functions into mm, replaces the use of pud_large with pud_huge and provides a config flag to activate them: CONFIG_ARCH_WANT_GENERAL_HUGETLB If CONFIG_ARCH_WANT_HUGE_PMD_SHARE is also active then the huge_pmd_share code will be called by huge_pte_alloc (othewise we call pmd_alloc and skip the sharing code). Signed-off-by: Steve Capper <steve.capper@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
cfe28c5d63
commit
9e5fc74c30
97
mm/hugetlb.c
97
mm/hugetlb.c
@ -2931,15 +2931,6 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Can be overriden by architectures */
|
|
||||||
__attribute__((weak)) struct page *
|
|
||||||
follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|
||||||
pud_t *pud, int write)
|
|
||||||
{
|
|
||||||
BUG();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page **pages, struct vm_area_struct **vmas,
|
struct page **pages, struct vm_area_struct **vmas,
|
||||||
unsigned long *position, unsigned long *nr_pages,
|
unsigned long *position, unsigned long *nr_pages,
|
||||||
@ -3289,8 +3280,96 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
|||||||
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
|
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
#define want_pmd_share() (1)
|
||||||
|
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
|
||||||
|
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#define want_pmd_share() (0)
|
||||||
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
|
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
|
||||||
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||||
|
unsigned long addr, unsigned long sz)
|
||||||
|
{
|
||||||
|
pgd_t *pgd;
|
||||||
|
pud_t *pud;
|
||||||
|
pte_t *pte = NULL;
|
||||||
|
|
||||||
|
pgd = pgd_offset(mm, addr);
|
||||||
|
pud = pud_alloc(mm, pgd, addr);
|
||||||
|
if (pud) {
|
||||||
|
if (sz == PUD_SIZE) {
|
||||||
|
pte = (pte_t *)pud;
|
||||||
|
} else {
|
||||||
|
BUG_ON(sz != PMD_SIZE);
|
||||||
|
if (want_pmd_share() && pud_none(*pud))
|
||||||
|
pte = huge_pmd_share(mm, addr, pud);
|
||||||
|
else
|
||||||
|
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
|
||||||
|
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||||
|
{
|
||||||
|
pgd_t *pgd;
|
||||||
|
pud_t *pud;
|
||||||
|
pmd_t *pmd = NULL;
|
||||||
|
|
||||||
|
pgd = pgd_offset(mm, addr);
|
||||||
|
if (pgd_present(*pgd)) {
|
||||||
|
pud = pud_offset(pgd, addr);
|
||||||
|
if (pud_present(*pud)) {
|
||||||
|
if (pud_huge(*pud))
|
||||||
|
return (pte_t *)pud;
|
||||||
|
pmd = pmd_offset(pud, addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (pte_t *) pmd;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct page *
|
||||||
|
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||||
|
pmd_t *pmd, int write)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
page = pte_page(*(pte_t *)pmd);
|
||||||
|
if (page)
|
||||||
|
page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct page *
|
||||||
|
follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
||||||
|
pud_t *pud, int write)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
page = pte_page(*(pte_t *)pud);
|
||||||
|
if (page)
|
||||||
|
page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
|
||||||
|
|
||||||
|
/* Can be overriden by architectures */
|
||||||
|
__attribute__((weak)) struct page *
|
||||||
|
follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
||||||
|
pud_t *pud, int write)
|
||||||
|
{
|
||||||
|
BUG();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_FAILURE
|
#ifdef CONFIG_MEMORY_FAILURE
|
||||||
|
|
||||||
/* Should be called in hugetlb_lock */
|
/* Should be called in hugetlb_lock */
|
||||||
|
Loading…
Reference in New Issue
Block a user