mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:40:52 +07:00
powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range
Some archs like ppc64 need to do special things when flushing tlb for hugepage. Add a new helper to flush hugetlb tlb range. This helps us to avoid flushing the entire tlb mapping for the pid. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
fbfa26d854
commit
5491ae7b6f
@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize)
|
||||
return mmu_psize_defs[psize].ap;
|
||||
}
|
||||
|
||||
extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, int psize);
|
||||
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
|
||||
|
@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
|
||||
return hash__flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
|
||||
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (radix_enabled())
|
||||
return radix__flush_hugetlb_tlb_range(vma, start, end);
|
||||
return hash__flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
|
@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v
|
||||
radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
|
||||
}
|
||||
|
||||
void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
int psize;
|
||||
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||
|
||||
psize = hstate_get_psize(hstate);
|
||||
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
|
||||
}
|
||||
|
||||
/*
|
||||
* A vairant of hugetlb_get_unmapped_area doing topdown search
|
||||
* FIXME!! should we do as x86 does or non hugetlb area does ?
|
||||
|
10
mm/hugetlb.c
10
mm/hugetlb.c
@ -3938,6 +3938,14 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
return i ? i : -EFAULT;
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
|
||||
/*
|
||||
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
|
||||
* implement this.
|
||||
*/
|
||||
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
|
||||
#endif
|
||||
|
||||
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot)
|
||||
{
|
||||
@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
* once we release i_mmap_rwsem, another task can do the final put_page
|
||||
* and that page table be reused and filled with junk.
|
||||
*/
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_hugetlb_tlb_range(vma, start, end);
|
||||
mmu_notifier_invalidate_range(mm, start, end);
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
mmu_notifier_invalidate_range_end(mm, start, end);
|
||||
|
Loading…
Reference in New Issue
Block a user