mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-17 21:56:16 +07:00
d4748276ae
There are several cases outside the normal address space management where a CPU's entire local TLB is to be flushed: 1. Booting the kernel, in case something has left stale entries in the TLB (e.g., kexec). 2. Machine check, to clean corrupted TLB entries. One other place where the TLB is flushed, is waking from deep idle states. The flush is a side-effect of calling ->cpu_restore with the intention of re-setting various SPRs. The flush itself is unnecessary because in the first case, the TLB should not acquire new corrupted TLB entries as part of sleep/wake (though they may be lost). This type of TLB flush is coded inflexibly, several times for each CPU type, and they have a number of problems with ISA v3.0B: - The current radix mode of the MMU is not taken into account, it is always done as a hash flushn For IS=2 (LPID-matching flush from host) and IS=3 with HV=0 (guest kernel flush), tlbie(l) is undefined if the R field does not match the current radix mode. - ISA v3.0B hash must flush the partition and process table caches as well. - ISA v3.0B radix must flush partition and process scoped translations, partition and process table caches, and also the page walk cache. So consolidate the flushing code and implement it in C and inline asm under the mm/ directory with the rest of the flush code. Add ISA v3.0B cases for radix and hash, and use the radix flush in radix environment. Provide a way for IS=2 (LPID flush) to specify the radix mode of the partition. Have KVM pass in the radix mode of the guest. Take out the flushes from early cputable/dt_cpu_ftrs detection hooks, and move it later in the boot process after, the MMU registers are set up and before relocation is first turned on. The TLB flush is no longer called when restoring from deep idle states. This was not be done as a separate step because booting secondaries uses the same cpu_restore as idle restore, which needs the TLB flush. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
121 lines
3.0 KiB
C
121 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
|
|
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
|
|
|
|
/*
|
|
* TLB flushing for 64-bit hash-MMU CPUs
|
|
*/
|
|
|
|
#include <linux/percpu.h>
|
|
#include <asm/page.h>
|
|
|
|
#define PPC64_TLB_BATCH_NR 192
|
|
|
|
struct ppc64_tlb_batch {
|
|
int active;
|
|
unsigned long index;
|
|
struct mm_struct *mm;
|
|
real_pte_t pte[PPC64_TLB_BATCH_NR];
|
|
unsigned long vpn[PPC64_TLB_BATCH_NR];
|
|
unsigned int psize;
|
|
int ssize;
|
|
};
|
|
DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
|
|
|
|
extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
|
|
|
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
|
|
|
static inline void arch_enter_lazy_mmu_mode(void)
|
|
{
|
|
struct ppc64_tlb_batch *batch;
|
|
|
|
if (radix_enabled())
|
|
return;
|
|
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
|
batch->active = 1;
|
|
}
|
|
|
|
static inline void arch_leave_lazy_mmu_mode(void)
|
|
{
|
|
struct ppc64_tlb_batch *batch;
|
|
|
|
if (radix_enabled())
|
|
return;
|
|
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
|
|
|
if (batch->index)
|
|
__flush_tlb_pending(batch);
|
|
batch->active = 0;
|
|
}
|
|
|
|
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
|
|
|
extern void hash__tlbiel_all(unsigned int action);
|
|
|
|
extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
|
|
int ssize, unsigned long flags);
|
|
extern void flush_hash_range(unsigned long number, int local);
|
|
extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
|
|
pmd_t *pmdp, unsigned int psize, int ssize,
|
|
unsigned long flags);
|
|
static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void hash__flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void hash__local_flush_all_mm(struct mm_struct *mm)
|
|
{
|
|
/*
|
|
* There's no Page Walk Cache for hash, so what is needed is
|
|
* the same as flush_tlb_mm(), which doesn't really make sense
|
|
* with hash. So the only thing we could do is flush the
|
|
* entire LPID! Punt for now, as it's not being used.
|
|
*/
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
static inline void hash__flush_all_mm(struct mm_struct *mm)
|
|
{
|
|
/*
|
|
* There's no Page Walk Cache for hash, so what is needed is
|
|
* the same as flush_tlb_mm(), which doesn't really make sense
|
|
* with hash. So the only thing we could do is flush the
|
|
* entire LPID! Punt for now, as it's not being used.
|
|
*/
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
}
|
|
|
|
static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
}
|
|
|
|
static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
|
|
static inline void hash__flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
}
|
|
|
|
|
|
struct mmu_gather;
|
|
extern void hash__tlb_flush(struct mmu_gather *tlb);
|
|
/* Private function for use by PCI IO mapping code */
|
|
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
|
unsigned long end);
|
|
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long addr);
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
|