mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
ef26b76d1a
commit: cf11e85fc0
("mm: hugetlb: optionally allocate gigantic hugepages using cma")
added support for allocating gigantic hugepages using CMA. This patch
enables the same for powerpc
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200713150749.25245-1-aneesh.kumar@linux.ibm.com
84 lines
2.2 KiB
C
84 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_HUGETLB_H
|
|
#define _ASM_POWERPC_HUGETLB_H
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
#include <asm/page.h>
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
#include <asm/book3s/64/hugetlb.h>
|
|
#elif defined(CONFIG_PPC_FSL_BOOK3E)
|
|
#include <asm/nohash/hugetlb-book3e.h>
|
|
#elif defined(CONFIG_PPC_8xx)
|
|
#include <asm/nohash/32/hugetlb-8xx.h>
|
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
|
|
|
extern bool hugetlb_disabled;
|
|
|
|
void hugetlbpage_init_default(void);
|
|
|
|
void flush_dcache_icache_hugepage(struct page *page);
|
|
|
|
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
|
|
unsigned long len);
|
|
|
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
unsigned long addr,
|
|
unsigned long len)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
|
|
return slice_is_hugepage_only_range(mm, addr, len);
|
|
return 0;
|
|
}
|
|
#define is_hugepage_only_range is_hugepage_only_range
|
|
|
|
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
|
|
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
|
unsigned long end, unsigned long floor,
|
|
unsigned long ceiling);
|
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
|
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
|
|
}
|
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
|
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
|
flush_hugetlb_page(vma, addr);
|
|
}
|
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
|
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte, int dirty);
|
|
|
|
void gigantic_hugetlb_cma_reserve(void) __init;
|
|
#include <asm-generic/hugetlb.h>
|
|
|
|
#else /* ! CONFIG_HUGETLB_PAGE */
|
|
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
}
|
|
|
|
#define hugepd_shift(x) 0
|
|
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
|
|
unsigned pdshift)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
|
|
static inline void __init gigantic_hugetlb_cma_reserve(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_HUGETLB_PAGE */
|
|
|
|
#endif /* _ASM_POWERPC_HUGETLB_H */
|