mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
6d8278c414
In the case of a spurious fault (which can happen due to a race with another thread that changes the page table), the default Linux mm code calls flush_tlb_page for that address. This is not required because the pte will be re-fetched. Hash does not wire this up to a hardware TLB flush for this reason. This patch avoids the flush for radix. >From Power ISA v3.0B, p.1090: Setting a Reference or Change Bit or Upgrading Access Authority (PTE Subject to Atomic Hardware Updates) If the only change being made to a valid PTE that is subject to atomic hardware updates is to set the Refer- ence or Change bit to 1 or to add access authorities, a simpler sequence suffices because the translation hardware will refetch the PTE if an access is attempted for which the only problems were reference and/or change bits needing to be set or insufficient access authority. The nest MMU on POWER9 does not re-fetch the PTE after such an access attempt before faulting, so address spaces with a coprocessor attached will continue to flush in these cases. This reduces tlbies for a kernel compile workload from 0.95M to 0.90M. fork --fork --exec benchmark improved 0.5% (12300->12400). Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
166 lines
4.3 KiB
C
166 lines
4.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
|
|
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
|
|
|
|
#define MMU_NO_CONTEXT ~0UL
|
|
|
|
#include <linux/mm_types.h>
|
|
#include <asm/book3s/64/tlbflush-hash.h>
|
|
#include <asm/book3s/64/tlbflush-radix.h>
|
|
|
|
/* TLB flush actions. Used as argument to tlbiel_all() */
|
|
enum {
|
|
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
|
|
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
|
|
};
|
|
|
|
#ifdef CONFIG_PPC_NATIVE
|
|
static inline void tlbiel_all(void)
|
|
{
|
|
/*
|
|
* This is used for host machine check and bootup.
|
|
*
|
|
* This uses early_radix_enabled and implementations use
|
|
* early_cpu_has_feature etc because that works early in boot
|
|
* and this is the machine check path which is not performance
|
|
* critical.
|
|
*/
|
|
if (early_radix_enabled())
|
|
radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
|
|
else
|
|
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
|
|
}
|
|
#else
|
|
static inline void tlbiel_all(void) { BUG(); };
|
|
#endif
|
|
|
|
static inline void tlbiel_all_lpid(bool radix)
|
|
{
|
|
/*
|
|
* This is used for guest machine check.
|
|
*/
|
|
if (radix)
|
|
radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
|
|
else
|
|
hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
|
|
}
|
|
|
|
|
|
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
|
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_pmd_tlb_range(vma, start, end);
|
|
return hash__flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
|
|
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_hugetlb_tlb_range(vma, start, end);
|
|
return hash__flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_range(vma, start, end);
|
|
return hash__flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
static inline void flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_kernel_range(start, end);
|
|
return hash__flush_tlb_kernel_range(start, end);
|
|
}
|
|
|
|
static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__local_flush_tlb_mm(mm);
|
|
return hash__local_flush_tlb_mm(mm);
|
|
}
|
|
|
|
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__local_flush_tlb_page(vma, vmaddr);
|
|
return hash__local_flush_tlb_page(vma, vmaddr);
|
|
}
|
|
|
|
static inline void local_flush_all_mm(struct mm_struct *mm)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__local_flush_all_mm(mm);
|
|
return hash__local_flush_all_mm(mm);
|
|
}
|
|
|
|
static inline void tlb_flush(struct mmu_gather *tlb)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__tlb_flush(tlb);
|
|
return hash__tlb_flush(tlb);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_mm(mm);
|
|
return hash__flush_tlb_mm(mm);
|
|
}
|
|
|
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_page(vma, vmaddr);
|
|
return hash__flush_tlb_page(vma, vmaddr);
|
|
}
|
|
|
|
static inline void flush_all_mm(struct mm_struct *mm)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_all_mm(mm);
|
|
return hash__flush_all_mm(mm);
|
|
}
|
|
#else
|
|
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
|
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
|
|
#define flush_all_mm(mm) local_flush_all_mm(mm)
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
|
|
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
|
|
unsigned long address)
|
|
{
|
|
/* See ptep_set_access_flags comment */
|
|
if (atomic_read(&vma->vm_mm->context.copros) > 0)
|
|
flush_tlb_page(vma, address);
|
|
}
|
|
|
|
/*
|
|
* flush the page walk cache for the address
|
|
*/
|
|
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
|
|
{
|
|
/*
|
|
* Flush the page table walk cache on freeing a page table. We already
|
|
* have marked the upper/higher level page table entry none by now.
|
|
* So it is safe to flush PWC here.
|
|
*/
|
|
if (!radix_enabled())
|
|
return;
|
|
|
|
radix__flush_tlb_pwc(tlb, address);
|
|
}
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
|