mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-06 01:16:42 +07:00
ce6234b529
Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
86 lines
2.2 KiB
C
86 lines
2.2 KiB
C
/*
|
|
* highmem.h: virtual kernel memory mappings for high memory
|
|
*
|
|
* Used in CONFIG_HIGHMEM systems for memory pages which
|
|
* are not addressable by direct kernel virtual addresses.
|
|
*
|
|
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
|
|
* Gerhard.Wichert@pdb.siemens.de
|
|
*
|
|
*
|
|
* Redesigned the x86 32-bit VM architecture to deal with
|
|
* up to 16 Terabyte physical memory. With current x86 CPUs
|
|
* we now support up to 64 Gigabytes physical RAM.
|
|
*
|
|
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
|
|
*/
|
|
|
|
#ifndef _ASM_HIGHMEM_H
|
|
#define _ASM_HIGHMEM_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/threads.h>
|
|
#include <asm/kmap_types.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/paravirt.h>
|
|
|
|
/* declarations for highmem.c */
|
|
extern unsigned long highstart_pfn, highend_pfn;
|
|
|
|
extern pte_t *kmap_pte;
|
|
extern pgprot_t kmap_prot;
|
|
extern pte_t *pkmap_page_table;
|
|
|
|
/*
|
|
* Right now we initialize only a single pte table. It can be extended
|
|
* easily, subsequent pte tables have to be allocated in one physical
|
|
* chunk of RAM.
|
|
*/
|
|
#ifdef CONFIG_X86_PAE
|
|
#define LAST_PKMAP 512
|
|
#else
|
|
#define LAST_PKMAP 1024
|
|
#endif
|
|
/*
|
|
* Ordering is:
|
|
*
|
|
* FIXADDR_TOP
|
|
* fixed_addresses
|
|
* FIXADDR_START
|
|
* temp fixed addresses
|
|
* FIXADDR_BOOT_START
|
|
* Persistent kmap area
|
|
* PKMAP_BASE
|
|
* VMALLOC_END
|
|
* Vmalloc area
|
|
* VMALLOC_START
|
|
* high_memory
|
|
*/
|
|
#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
|
|
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
|
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
|
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
|
|
|
extern void * FASTCALL(kmap_high(struct page *page));
|
|
extern void FASTCALL(kunmap_high(struct page *page));
|
|
|
|
void *kmap(struct page *page);
|
|
void kunmap(struct page *page);
|
|
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
|
|
void *kmap_atomic(struct page *page, enum km_type type);
|
|
void kunmap_atomic(void *kvaddr, enum km_type type);
|
|
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
|
struct page *kmap_atomic_to_page(void *ptr);
|
|
|
|
#ifndef CONFIG_PARAVIRT
|
|
#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
|
|
#endif
|
|
|
|
#define flush_cache_kmaps() do { } while (0)
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _ASM_HIGHMEM_H */
|