mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 16:26:43 +07:00
aefa5688c0
upatepp can get called for a nohpte fault when we find from the linux page table that the translation was hashed before. In that case we are sure that there is no existing translation, hence we could avoid doing tlbie. We could possibly race with a parallel fault filling the TLB. But that should be ok because updatepp is only ever relaxing permissions. We also look at linux pte permission bits when filling hash pte permission bits. We also hold the linux pte busy bits while inserting/updating a hashpte entry, hence a paralle update of linux pte is not possible. On the other hand mprotect involves ptep_modify_prot_start which cause a hpte invalidate and not updatepp. Performance number: We use randbox_access_bench written by Anton. Kernel with THP disabled and smaller hash page table size. 86.60% random_access_b [kernel.kallsyms] [k] .native_hpte_updatepp 2.10% random_access_b random_access_bench [.] doit 1.99% random_access_b [kernel.kallsyms] [k] .do_raw_spin_lock 1.85% random_access_b [kernel.kallsyms] [k] .native_hpte_insert 1.26% random_access_b [kernel.kallsyms] [k] .native_flush_hash_range 1.18% random_access_b [kernel.kallsyms] [k] .__delay 0.69% random_access_b [kernel.kallsyms] [k] .native_hpte_remove 0.37% random_access_b [kernel.kallsyms] [k] .clear_user_page 0.34% random_access_b [kernel.kallsyms] [k] .__hash_page_64K 0.32% random_access_b [kernel.kallsyms] [k] fast_exception_return 0.30% random_access_b [kernel.kallsyms] [k] .hash_page_mm With Fix: 27.54% random_access_b random_access_bench [.] doit 22.90% random_access_b [kernel.kallsyms] [k] .native_hpte_insert 5.76% random_access_b [kernel.kallsyms] [k] .native_hpte_remove 5.20% random_access_b [kernel.kallsyms] [k] fast_exception_return 5.12% random_access_b [kernel.kallsyms] [k] .__hash_page_64K 4.80% random_access_b [kernel.kallsyms] [k] .hash_page_mm 3.31% random_access_b [kernel.kallsyms] [k] data_access_common 1.84% random_access_b [kernel.kallsyms] [k] .trace_hardirqs_on_caller Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
130 lines
3.8 KiB
C
130 lines
3.8 KiB
C
/*
|
|
* PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
|
|
*
|
|
* Copyright (C) 2003 David Gibson, IBM Corporation.
|
|
*
|
|
* Based on the IA-32 version:
|
|
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/machdep.h>
|
|
|
|
extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
|
|
unsigned long pa, unsigned long rlags,
|
|
unsigned long vflags, int psize, int ssize);
|
|
|
|
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
pte_t *ptep, unsigned long trap, unsigned long flags,
|
|
int ssize, unsigned int shift, unsigned int mmu_psize)
|
|
{
|
|
unsigned long vpn;
|
|
unsigned long old_pte, new_pte;
|
|
unsigned long rflags, pa, sz;
|
|
long slot;
|
|
|
|
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
|
|
|
|
/* Search the Linux page table for a match with va */
|
|
vpn = hpt_vpn(ea, vsid, ssize);
|
|
|
|
/* At this point, we have a pte (old_pte) which can be used to build
|
|
* or update an HPTE. There are 2 cases:
|
|
*
|
|
* 1. There is a valid (present) pte with no associated HPTE (this is
|
|
* the most common case)
|
|
* 2. There is a valid (present) pte with an associated HPTE. The
|
|
* current values of the pp bits in the HPTE prevent access
|
|
* because we are doing software DIRTY bit management and the
|
|
* page is currently not DIRTY.
|
|
*/
|
|
|
|
|
|
do {
|
|
old_pte = pte_val(*ptep);
|
|
/* If PTE busy, retry the access */
|
|
if (unlikely(old_pte & _PAGE_BUSY))
|
|
return 0;
|
|
/* If PTE permissions don't match, take page fault */
|
|
if (unlikely(access & ~old_pte))
|
|
return 1;
|
|
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
|
|
* a write access */
|
|
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
|
|
if (access & _PAGE_RW)
|
|
new_pte |= _PAGE_DIRTY;
|
|
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
|
|
old_pte, new_pte));
|
|
|
|
rflags = 0x2 | (!(new_pte & _PAGE_RW));
|
|
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
|
|
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
|
|
sz = ((1UL) << shift);
|
|
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
|
/* No CPU has hugepages but lacks no execute, so we
|
|
* don't need to worry about that case */
|
|
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
|
|
|
|
/* Check if pte already has an hpte (case 2) */
|
|
if (unlikely(old_pte & _PAGE_HASHPTE)) {
|
|
/* There MIGHT be an HPTE for this pte */
|
|
unsigned long hash, slot;
|
|
|
|
hash = hpt_hash(vpn, shift, ssize);
|
|
if (old_pte & _PAGE_F_SECOND)
|
|
hash = ~hash;
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
slot += (old_pte & _PAGE_F_GIX) >> 12;
|
|
|
|
if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
|
|
mmu_psize, ssize, flags) == -1)
|
|
old_pte &= ~_PAGE_HPTEFLAGS;
|
|
}
|
|
|
|
if (likely(!(old_pte & _PAGE_HASHPTE))) {
|
|
unsigned long hash = hpt_hash(vpn, shift, ssize);
|
|
|
|
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
|
|
|
|
/* clear HPTE slot informations in new PTE */
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
|
|
#else
|
|
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
|
|
#endif
|
|
/* Add in WIMG bits */
|
|
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
|
|
_PAGE_COHERENT | _PAGE_GUARDED));
|
|
/*
|
|
* enable the memory coherence always
|
|
*/
|
|
rflags |= HPTE_R_M;
|
|
|
|
slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
|
|
mmu_psize, ssize);
|
|
|
|
/*
|
|
* Hypervisor failure. Restore old pte and return -1
|
|
* similar to __hash_page_*
|
|
*/
|
|
if (unlikely(slot == -2)) {
|
|
*ptep = __pte(old_pte);
|
|
hash_failure_debug(ea, access, vsid, trap, ssize,
|
|
mmu_psize, mmu_psize, old_pte);
|
|
return -1;
|
|
}
|
|
|
|
new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
|
|
}
|
|
|
|
/*
|
|
* No need to use ldarx/stdcx here
|
|
*/
|
|
*ptep = __pte(new_pte & ~_PAGE_BUSY);
|
|
return 0;
|
|
}
|