mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 09:26:48 +07:00
aaa50048f6
In commit e616c59140
, highmem support was
deactivated for SMP platforms without hardware TLB ops broadcast because
usage of kmap_high_get() requires that IRQs be disabled when kmap_lock
is locked which is incompatible with the IPI mechanism used by the
software TLB ops broadcast invoked through flush_all_zero_pkmaps().
The reason for kmap_high_get() is to ensure that the currently kmap'd
page usage count does not decrease to zero while we're using its
existing virtual mapping in an atomic context. With a VIVT cache this
is essential to do due to cache coherency issues, but with a VIPT cache
this is only an optimization so not to pay the price of establishing a
second mapping if an existing one can be used. However, on VIPT
platforms without hardware TLB maintenance we can give up on that
optimization in order to be able to use highmem.
From ARMv7 onwards the TLB ops are broadcasted in hardware, so let's
disable ARCH_NEEDS_KMAP_HIGH_GET only when CONFIG_SMP and
CONFIG_CPU_TLB_V6 are defined.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Saeed Bishara <saeed.bishara@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
67 lines
2.0 KiB
C
67 lines
2.0 KiB
C
#ifndef _ASM_HIGHMEM_H
|
|
#define _ASM_HIGHMEM_H
|
|
|
|
#include <asm/kmap_types.h>
|
|
|
|
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
|
|
#define LAST_PKMAP PTRS_PER_PTE
|
|
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
|
|
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
|
|
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
|
|
|
#define kmap_prot PAGE_KERNEL
|
|
|
|
#define flush_cache_kmaps() \
|
|
do { \
|
|
if (cache_is_vivt()) \
|
|
flush_cache_all(); \
|
|
} while (0)
|
|
|
|
extern pte_t *pkmap_page_table;
|
|
|
|
extern void *kmap_high(struct page *page);
|
|
extern void kunmap_high(struct page *page);
|
|
|
|
/*
|
|
* The reason for kmap_high_get() is to ensure that the currently kmap'd
|
|
* page usage count does not decrease to zero while we're using its
|
|
* existing virtual mapping in an atomic context. With a VIVT cache this
|
|
* is essential to do, but with a VIPT cache this is only an optimization
|
|
* so not to pay the price of establishing a second mapping if an existing
|
|
* one can be used. However, on platforms without hardware TLB maintenance
|
|
* broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
|
|
* the locking involved must also disable IRQs which is incompatible with
|
|
* the IPI mechanism used by global TLB operations.
|
|
*/
|
|
#define ARCH_NEEDS_KMAP_HIGH_GET
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
|
|
#undef ARCH_NEEDS_KMAP_HIGH_GET
|
|
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
|
|
#error "The sum of features in your kernel config cannot be supported together"
|
|
#endif
|
|
#endif
|
|
|
|
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
|
|
extern void *kmap_high_get(struct page *page);
|
|
#else
|
|
static inline void *kmap_high_get(struct page *page)
|
|
{
|
|
return NULL;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* The following functions are already defined by <linux/highmem.h>
|
|
* when CONFIG_HIGHMEM is not set.
|
|
*/
|
|
#ifdef CONFIG_HIGHMEM
|
|
extern void *kmap(struct page *page);
|
|
extern void kunmap(struct page *page);
|
|
extern void *__kmap_atomic(struct page *page);
|
|
extern void __kunmap_atomic(void *kvaddr);
|
|
extern void *kmap_atomic_pfn(unsigned long pfn);
|
|
extern struct page *kmap_atomic_to_page(const void *ptr);
|
|
#endif
|
|
|
|
#endif
|