mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 13:45:08 +07:00
64f31d5802
ptep_flush_lazy and pmdp_flush_lazy use mm->context.attach_count to decide between a lazy TLB flush vs an immediate TLB flush. The field contains two 16-bit counters, the number of CPUs that have the mm attached and can create TLB entries for it and the number of CPUs in the middle of a page table update. The __tlb_flush_asce, ptep_flush_direct and pmdp_flush_direct functions use the attach counter and a mask check with mm_cpumask(mm) to decide between a local flush local of the current CPU and a global flush. For all these functions the decision between lazy vs immediate and local vs global TLB flush can be based on CPU masks. There are two masks: the mm->context.cpu_attach_mask with the CPUs that are actively using the mm, and the mm_cpumask(mm) with the CPUs that have used the mm since the last full flush. The decision between lazy vs immediate flush is based on the mm->context.cpu_attach_mask, to decide between local vs global flush the mm_cpumask(mm) is used. With this patch all checks will use the CPU masks, the old counter mm->context.attach_count with its two 16-bit values is turned into a single counter mm->context.flush_count that keeps track of the number of CPUs with incomplete page table updates. The sole user of this counter is finish_arch_post_lock_switch() which waits for the end of all page table updates. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
45 lines
1.0 KiB
C
45 lines
1.0 KiB
C
#ifndef __MMU_H
|
|
#define __MMU_H
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/errno.h>
|
|
|
|
typedef struct {
|
|
cpumask_t cpu_attach_mask;
|
|
atomic_t flush_count;
|
|
unsigned int flush_mm;
|
|
spinlock_t list_lock;
|
|
struct list_head pgtable_list;
|
|
struct list_head gmap_list;
|
|
unsigned long asce;
|
|
unsigned long asce_limit;
|
|
unsigned long vdso_base;
|
|
/* The mmu context allocates 4K page tables. */
|
|
unsigned int alloc_pgste:1;
|
|
/* The mmu context uses extended page tables. */
|
|
unsigned int has_pgste:1;
|
|
/* The mmu context uses storage keys. */
|
|
unsigned int use_skey:1;
|
|
} mm_context_t;
|
|
|
|
#define INIT_MM_CONTEXT(name) \
|
|
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
|
|
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
|
|
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
|
|
|
static inline int tprot(unsigned long addr)
|
|
{
|
|
int rc = -EFAULT;
|
|
|
|
asm volatile(
|
|
" tprot 0(%1),0\n"
|
|
"0: ipm %0\n"
|
|
" srl %0,28\n"
|
|
"1:\n"
|
|
EX_TABLE(0b,1b)
|
|
: "+d" (rc) : "a" (addr) : "cc");
|
|
return rc;
|
|
}
|
|
|
|
#endif
|