mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-12 23:16:39 +07:00
f4f97b3ea9
Xen requires all active pagetables to be marked read-only. When the base of the pagetable is loaded into %cr3, the hypervisor validates the entire pagetable and only allows the load to proceed if it all checks out. This is pretty slow, so to mitigate this cost Xen has a notion of pinned pagetables. Pinned pagetables are pagetables which are considered to be active even if no processor's cr3 is pointing to is. This means that it must remain read-only and all updates are validated by the hypervisor. This makes context switches much cheaper, because the hypervisor doesn't need to revalidate the pagetable each time. This also adds a new paravirt hook which is called during setup once the zones and memory allocator have been initialized. When the init_mm pagetable is first built, the struct page array does not yet exist, and so there's nowhere to put he init_mm pagetable's PG_pinned flags. Once the zones are initialized and the struct page array exists, we can set the PG_pinned flags for those pages. This patch also adds the Xen support for pte pages allocated out of highmem (highpte) by implementing xen_kmap_atomic_pte. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: Zach Amsden <zach@vmware.com>
48 lines
1.3 KiB
C
48 lines
1.3 KiB
C
#ifndef _XEN_MMU_H
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page.h>
|
|
|
|
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
|
|
|
|
void xen_set_pte(pte_t *ptep, pte_t pteval);
|
|
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pteval);
|
|
void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
|
|
|
|
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
|
|
void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
|
|
void xen_exit_mmap(struct mm_struct *mm);
|
|
|
|
void xen_pgd_pin(pgd_t *pgd);
|
|
//void xen_pgd_unpin(pgd_t *pgd);
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
unsigned long long xen_pte_val(pte_t);
|
|
unsigned long long xen_pmd_val(pmd_t);
|
|
unsigned long long xen_pgd_val(pgd_t);
|
|
|
|
pte_t xen_make_pte(unsigned long long);
|
|
pmd_t xen_make_pmd(unsigned long long);
|
|
pgd_t xen_make_pgd(unsigned long long);
|
|
|
|
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pteval);
|
|
void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
|
|
void xen_set_pud(pud_t *ptr, pud_t val);
|
|
void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
void xen_pmd_clear(pmd_t *pmdp);
|
|
|
|
|
|
#else
|
|
unsigned long xen_pte_val(pte_t);
|
|
unsigned long xen_pmd_val(pmd_t);
|
|
unsigned long xen_pgd_val(pgd_t);
|
|
|
|
pte_t xen_make_pte(unsigned long);
|
|
pmd_t xen_make_pmd(unsigned long);
|
|
pgd_t xen_make_pgd(unsigned long);
|
|
#endif
|
|
|
|
#endif /* _XEN_MMU_H */
|