mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
60f07c8ec5
The order in __tlb_flush_mm_lazy is to flush TLB first and then clear the mm->context.flush_mm bit. This can lead to missed flushes as the bit can be set anytime, the order needs to be the other way aronud. But this leads to a different race, __tlb_flush_mm_lazy may be called on two CPUs concurrently. If mm->context.flush_mm is cleared first then another CPU can bypass __tlb_flush_mm_lazy although the first CPU has not done the flush yet. In a virtualized environment the time until the flush is finally completed can be arbitrarily long. Add a spinlock to serialize __tlb_flush_mm_lazy and use the function in finish_arch_post_lock_switch as well. Cc: <stable@vger.kernel.org> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
152 lines
3.5 KiB
C
152 lines
3.5 KiB
C
#ifndef _S390_TLBFLUSH_H
|
|
#define _S390_TLBFLUSH_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
/*
|
|
* Flush all TLB entries on the local CPU.
|
|
*/
|
|
static inline void __tlb_flush_local(void)
|
|
{
|
|
asm volatile("ptlb" : : : "memory");
|
|
}
|
|
|
|
/*
|
|
* Flush TLB entries for a specific ASCE on all CPUs
|
|
*/
|
|
static inline void __tlb_flush_idte(unsigned long asce)
|
|
{
|
|
unsigned long opt;
|
|
|
|
opt = IDTE_PTOA;
|
|
if (MACHINE_HAS_TLB_GUEST)
|
|
opt |= IDTE_GUEST_ASCE;
|
|
/* Global TLB flush for the mm */
|
|
asm volatile(
|
|
" .insn rrf,0xb98e0000,0,%0,%1,0"
|
|
: : "a" (opt), "a" (asce) : "cc");
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
void smp_ptlb_all(void);
|
|
|
|
/*
|
|
* Flush all TLB entries on all CPUs.
|
|
*/
|
|
static inline void __tlb_flush_global(void)
|
|
{
|
|
unsigned int dummy = 0;
|
|
|
|
csp(&dummy, 0, 0);
|
|
}
|
|
|
|
/*
|
|
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
|
|
* this implicates multiple ASCEs!).
|
|
*/
|
|
static inline void __tlb_flush_mm(struct mm_struct *mm)
|
|
{
|
|
unsigned long gmap_asce;
|
|
|
|
/*
|
|
* If the machine has IDTE we prefer to do a per mm flush
|
|
* on all cpus instead of doing a local flush if the mm
|
|
* only ran on the local cpu.
|
|
*/
|
|
preempt_disable();
|
|
atomic_inc(&mm->context.flush_count);
|
|
/* Reset TLB flush mask */
|
|
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
|
barrier();
|
|
gmap_asce = READ_ONCE(mm->context.gmap_asce);
|
|
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
|
|
if (gmap_asce)
|
|
__tlb_flush_idte(gmap_asce);
|
|
__tlb_flush_idte(mm->context.asce);
|
|
} else {
|
|
/* Global TLB flush */
|
|
__tlb_flush_global();
|
|
}
|
|
atomic_dec(&mm->context.flush_count);
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline void __tlb_flush_kernel(void)
|
|
{
|
|
if (MACHINE_HAS_IDTE)
|
|
__tlb_flush_idte(init_mm.context.asce);
|
|
else
|
|
__tlb_flush_global();
|
|
}
|
|
#else
|
|
#define __tlb_flush_global() __tlb_flush_local()
|
|
|
|
/*
|
|
* Flush TLB entries for a specific ASCE on all CPUs.
|
|
*/
|
|
static inline void __tlb_flush_mm(struct mm_struct *mm)
|
|
{
|
|
__tlb_flush_local();
|
|
}
|
|
|
|
static inline void __tlb_flush_kernel(void)
|
|
{
|
|
__tlb_flush_local();
|
|
}
|
|
#endif
|
|
|
|
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
|
|
{
|
|
spin_lock(&mm->context.lock);
|
|
if (mm->context.flush_mm) {
|
|
mm->context.flush_mm = 0;
|
|
__tlb_flush_mm(mm);
|
|
}
|
|
spin_unlock(&mm->context.lock);
|
|
}
|
|
|
|
/*
|
|
* TLB flushing:
|
|
* flush_tlb() - flushes the current mm struct TLBs
|
|
* flush_tlb_all() - flushes all processes TLBs
|
|
* flush_tlb_mm(mm) - flushes the specified mm context TLB's
|
|
* flush_tlb_page(vma, vmaddr) - flushes one page
|
|
* flush_tlb_range(vma, start, end) - flushes a range of pages
|
|
* flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
|
|
*/
|
|
|
|
/*
|
|
* flush_tlb_mm goes together with ptep_set_wrprotect for the
|
|
* copy_page_range operation and flush_tlb_range is related to
|
|
* ptep_get_and_clear for change_protection. ptep_set_wrprotect and
|
|
* ptep_get_and_clear do not flush the TLBs directly if the mm has
|
|
* only one user. At the end of the update the flush_tlb_mm and
|
|
* flush_tlb_range functions need to do the flush.
|
|
*/
|
|
#define flush_tlb() do { } while (0)
|
|
#define flush_tlb_all() do { } while (0)
|
|
#define flush_tlb_page(vma, addr) do { } while (0)
|
|
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
__tlb_flush_mm_lazy(mm);
|
|
}
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
__tlb_flush_mm_lazy(vma->vm_mm);
|
|
}
|
|
|
|
static inline void flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
__tlb_flush_kernel();
|
|
}
|
|
|
|
#endif /* _S390_TLBFLUSH_H */
|