x86/mm: Add basic defines/helpers for CONFIG_X86_5LEVEL=y

Extends pagetable headers to support the new paging mode.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170330080731.65421-6-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Kirill A. Shutemov 2017-03-30 11:07:29 +03:00 committed by Ingo Molnar
parent 335437fbf7
commit b8504058a0
4 changed files with 71 additions and 2 deletions

View File

@ -35,6 +35,13 @@ extern void paging_init(void);
#define pud_ERROR(e) \ #define pud_ERROR(e) \
pr_err("%s:%d: bad pud %p(%016lx)\n", \ pr_err("%s:%d: bad pud %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pud_val(e)) __FILE__, __LINE__, &(e), pud_val(e))
#if CONFIG_PGTABLE_LEVELS >= 5
#define p4d_ERROR(e) \
pr_err("%s:%d: bad p4d %p(%016lx)\n", \
__FILE__, __LINE__, &(e), p4d_val(e))
#endif
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %p(%016lx)\n", \ pr_err("%s:%d: bad pgd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pgd_val(e)) __FILE__, __LINE__, &(e), pgd_val(e))
@ -128,7 +135,11 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
static inline void native_p4d_clear(p4d_t *p4d) static inline void native_p4d_clear(p4d_t *p4d)
{ {
#ifdef CONFIG_X86_5LEVEL
native_set_p4d(p4d, native_make_p4d(0));
#else
native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)}); native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
#endif
} }
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)

View File

@ -23,12 +23,32 @@ typedef struct { pteval_t pte; } pte_t;
#define SHARED_KERNEL_PMD 0 #define SHARED_KERNEL_PMD 0
#ifdef CONFIG_X86_5LEVEL
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
#define PGDIR_SHIFT 48
#define PTRS_PER_PGD 512
/*
* 4th level page in 5-level paging case
*/
#define P4D_SHIFT 39
#define PTRS_PER_P4D 512
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE - 1))
#else /* CONFIG_X86_5LEVEL */
/* /*
* PGDIR_SHIFT determines what a top-level page table entry can map * PGDIR_SHIFT determines what a top-level page table entry can map
*/ */
#define PGDIR_SHIFT 39 #define PGDIR_SHIFT 39
#define PTRS_PER_PGD 512 #define PTRS_PER_PGD 512
#endif /* CONFIG_X86_5LEVEL */
/* /*
* 3rd level page * 3rd level page
*/ */

View File

@ -273,9 +273,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
} }
#if CONFIG_PGTABLE_LEVELS > 4 #if CONFIG_PGTABLE_LEVELS > 4
typedef struct { p4dval_t p4d; } p4d_t;
#error FIXME static inline p4d_t native_make_p4d(pudval_t val)
{
return (p4d_t) { val };
}
static inline p4dval_t native_p4d_val(p4d_t p4d)
{
return p4d.p4d;
}
#else #else
#include <asm-generic/pgtable-nop4d.h> #include <asm-generic/pgtable-nop4d.h>

View File

@ -81,6 +81,14 @@ void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pud)); tlb_remove_page(tlb, virt_to_page(pud));
} }
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* CONFIG_PGTABLE_LEVELS > 2 */ #endif /* CONFIG_PGTABLE_LEVELS > 2 */
@ -120,7 +128,7 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
references from swapper_pg_dir. */ references from swapper_pg_dir. */
if (CONFIG_PGTABLE_LEVELS == 2 || if (CONFIG_PGTABLE_LEVELS == 2 ||
(CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
CONFIG_PGTABLE_LEVELS == 4) { CONFIG_PGTABLE_LEVELS >= 4) {
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS); KERNEL_PGD_PTRS);
@ -582,6 +590,28 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
} }
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
#ifdef CONFIG_X86_5LEVEL
/**
* p4d_set_huge - setup kernel P4D mapping
*
* No 512GB pages yet -- always return 0
*/
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
/**
* p4d_clear_huge - clear kernel P4D mapping when it is set
*
* No 512GB pages yet -- always return 0
*/
int p4d_clear_huge(p4d_t *p4d)
{
return 0;
}
#endif
/** /**
* pud_set_huge - setup kernel PUD mapping * pud_set_huge - setup kernel PUD mapping
* *