mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 16:40:59 +07:00
arm64: tlbflush: Ensure start/end of address range are aligned to stride
Since commit 3d65b6bbc0
("arm64: tlbi: Set MAX_TLBI_OPS to
PTRS_PER_PTE"), we resort to per-ASID invalidation when attempting to
perform more than PTRS_PER_PTE invalidation instructions in a single
call to __flush_tlb_range(). Whilst this is beneficial, the mmu_gather
code does not ensure that the end address of the range is rounded-up
to the stride when freeing intermediate page tables in pXX_free_tlb(),
which defeats our range checking.
Align the bounds passed into __flush_tlb_range().
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reported-by: Hanjun Guo <guohanjun@huawei.com>
Tested-by: Hanjun Guo <guohanjun@huawei.com>
Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
fa63da2ab0
commit
01d57485fc
@ -195,6 +195,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long asid = ASID(vma->vm_mm);
|
||||
unsigned long addr;
|
||||
|
||||
start = round_down(start, stride);
|
||||
end = round_up(end, stride);
|
||||
|
||||
if ((end - start) >= (MAX_TLBI_OPS * stride)) {
|
||||
flush_tlb_mm(vma->vm_mm);
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user