mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
2ef14f465b
Pull x86 mm changes from Peter Anvin: "This is a huge set of several partly interrelated (and concurrently developed) changes, which is why the branch history is messier than one would like. The *really* big items are two humonguous patchsets mostly developed by Yinghai Lu at my request, which completely revamps the way we create initial page tables. In particular, rather than estimating how much memory we will need for page tables and then build them into that memory -- a calculation that has shown to be incredibly fragile -- we now build them (on 64 bits) with the aid of a "pseudo-linear mode" -- a #PF handler which creates temporary page tables on demand. This has several advantages: 1. It makes it much easier to support things that need access to data very early (a followon patchset uses this to load microcode way early in the kernel startup). 2. It allows the kernel and all the kernel data objects to be invoked from above the 4 GB limit. This allows kdump to work on very large systems. 3. It greatly reduces the difference between Xen and native (Xen's equivalent of the #PF handler are the temporary page tables created by the domain builder), eliminating a bunch of fragile hooks. The patch series also gets us a bit closer to W^X. Additional work in this pull is the 64-bit get_user() work which you were also involved with, and a bunch of cleanups/speedups to __phys_addr()/__pa()." * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (105 commits) x86, mm: Move reserving low memory later in initialization x86, doc: Clarify the use of asm("%edx") in uaccess.h x86, mm: Redesign get_user with a __builtin_choose_expr hack x86: Be consistent with data size in getuser.S x86, mm: Use a bitfield to mask nuisance get_user() warnings x86/kvm: Fix compile warning in kvm_register_steal_time() x86-32: Add support for 64bit get_user() x86-32, mm: Remove reference to alloc_remap() x86-32, mm: Remove reference to resume_map_numa_kva() x86-32, mm: Rip out x86_32 NUMA remapping code x86/numa: Use __pa_nodebug() instead x86: Don't panic if can not alloc buffer for swiotlb mm: Add alloc_bootmem_low_pages_nopanic() x86, 64bit, mm: hibernate use generic mapping_init x86, 64bit, mm: Mark data/bss/brk to nx x86: Merge early kernel reserve for 32bit and 64bit x86: Add Crash kernel low reservation x86, kdump: Remove crashkernel range find limit for 64bit memblock: Add memblock_mem_size() x86, boot: Not need to check setup_header version for setup_data ...
191 lines
4.9 KiB
C
191 lines
4.9 KiB
C
#ifndef _ASM_X86_PGTABLE_64_H
|
|
#define _ASM_X86_PGTABLE_64_H
|
|
|
|
#include <linux/const.h>
|
|
#include <asm/pgtable_64_types.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/*
|
|
* This file contains the functions and defines necessary to modify and use
|
|
* the x86-64 page table tree.
|
|
*/
|
|
#include <asm/processor.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/threads.h>
|
|
|
|
extern pud_t level3_kernel_pgt[512];
|
|
extern pud_t level3_ident_pgt[512];
|
|
extern pmd_t level2_kernel_pgt[512];
|
|
extern pmd_t level2_fixmap_pgt[512];
|
|
extern pmd_t level2_ident_pgt[512];
|
|
extern pgd_t init_level4_pgt[];
|
|
|
|
#define swapper_pg_dir init_level4_pgt
|
|
|
|
extern void paging_init(void);
|
|
|
|
#define pte_ERROR(e) \
|
|
pr_err("%s:%d: bad pte %p(%016lx)\n", \
|
|
__FILE__, __LINE__, &(e), pte_val(e))
|
|
#define pmd_ERROR(e) \
|
|
pr_err("%s:%d: bad pmd %p(%016lx)\n", \
|
|
__FILE__, __LINE__, &(e), pmd_val(e))
|
|
#define pud_ERROR(e) \
|
|
pr_err("%s:%d: bad pud %p(%016lx)\n", \
|
|
__FILE__, __LINE__, &(e), pud_val(e))
|
|
#define pgd_ERROR(e) \
|
|
pr_err("%s:%d: bad pgd %p(%016lx)\n", \
|
|
__FILE__, __LINE__, &(e), pgd_val(e))
|
|
|
|
struct mm_struct;
|
|
|
|
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
|
|
|
|
|
|
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep)
|
|
{
|
|
*ptep = native_make_pte(0);
|
|
}
|
|
|
|
static inline void native_set_pte(pte_t *ptep, pte_t pte)
|
|
{
|
|
*ptep = pte;
|
|
}
|
|
|
|
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
{
|
|
native_set_pte(ptep, pte);
|
|
}
|
|
|
|
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
*pmdp = pmd;
|
|
}
|
|
|
|
static inline void native_pmd_clear(pmd_t *pmd)
|
|
{
|
|
native_set_pmd(pmd, native_make_pmd(0));
|
|
}
|
|
|
|
static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
return native_make_pte(xchg(&xp->pte, 0));
|
|
#else
|
|
/* native_local_ptep_get_and_clear,
|
|
but duplicated because of cyclic dependency */
|
|
pte_t ret = *xp;
|
|
native_pte_clear(NULL, 0, xp);
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
return native_make_pmd(xchg(&xp->pmd, 0));
|
|
#else
|
|
/* native_local_pmdp_get_and_clear,
|
|
but duplicated because of cyclic dependency */
|
|
pmd_t ret = *xp;
|
|
native_pmd_clear(xp);
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
static inline void native_set_pud(pud_t *pudp, pud_t pud)
|
|
{
|
|
*pudp = pud;
|
|
}
|
|
|
|
static inline void native_pud_clear(pud_t *pud)
|
|
{
|
|
native_set_pud(pud, native_make_pud(0));
|
|
}
|
|
|
|
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
|
|
{
|
|
*pgdp = pgd;
|
|
}
|
|
|
|
static inline void native_pgd_clear(pgd_t *pgd)
|
|
{
|
|
native_set_pgd(pgd, native_make_pgd(0));
|
|
}
|
|
|
|
extern void sync_global_pgds(unsigned long start, unsigned long end);
|
|
|
|
/*
|
|
* Conversion functions: convert a page and protection to a page entry,
|
|
* and a page entry and page directory to the page they refer to.
|
|
*/
|
|
|
|
/*
|
|
* Level 4 access.
|
|
*/
|
|
static inline int pgd_large(pgd_t pgd) { return 0; }
|
|
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
|
|
|
|
/* PUD - Level3 access */
|
|
|
|
/* PMD - Level 2 access */
|
|
#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
|
|
#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
|
|
_PAGE_FILE })
|
|
#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
|
|
|
|
/* PTE - Level 1 access. */
|
|
|
|
/* x86-64 always has all page tables mapped. */
|
|
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
|
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
|
|
|
/* Encode and de-code a swap entry */
|
|
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
|
|
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
|
|
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
|
|
#else
|
|
#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
|
|
#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
|
|
#endif
|
|
|
|
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
|
|
|
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
|
|
& ((1U << SWP_TYPE_BITS) - 1))
|
|
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
|
|
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
|
((type) << (_PAGE_BIT_PRESENT + 1)) \
|
|
| ((offset) << SWP_OFFSET_SHIFT) })
|
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
|
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
|
|
|
extern int kern_addr_valid(unsigned long addr);
|
|
extern void cleanup_highmap(void);
|
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
|
|
|
#define pgtable_cache_init() do { } while (0)
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
#define PAGE_AGP PAGE_KERNEL_NOCACHE
|
|
#define HAVE_PAGE_AGP 1
|
|
|
|
/* fs/proc/kcore.c */
|
|
#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
|
|
#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
|
|
|
|
#define __HAVE_ARCH_PTE_SAME
|
|
|
|
#define vmemmap ((struct page *)VMEMMAP_START)
|
|
|
|
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
|
|
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_PGTABLE_64_H */
|