mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 00:16:40 +07:00
26ff6c11ef
Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
133 lines
3.0 KiB
C
133 lines
3.0 KiB
C
/*
|
|
* TLB flushing operations for SH with an MMU.
|
|
*
|
|
* Copyright (C) 1999 Niibe Yutaka
|
|
* Copyright (C) 2003 Paul Mundt
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|
{
|
|
if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
|
|
unsigned long flags;
|
|
unsigned long asid;
|
|
unsigned long saved_asid = MMU_NO_ASID;
|
|
|
|
asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
|
|
page &= PAGE_MASK;
|
|
|
|
local_irq_save(flags);
|
|
if (vma->vm_mm != current->mm) {
|
|
saved_asid = get_asid();
|
|
set_asid(asid);
|
|
}
|
|
__flush_tlb_page(asid, page);
|
|
if (saved_asid != MMU_NO_ASID)
|
|
set_asid(saved_asid);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
if (mm->context != NO_CONTEXT) {
|
|
unsigned long flags;
|
|
int size;
|
|
|
|
local_irq_save(flags);
|
|
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
|
|
mm->context = NO_CONTEXT;
|
|
if (mm == current->mm)
|
|
activate_context(mm);
|
|
} else {
|
|
unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
|
|
unsigned long saved_asid = MMU_NO_ASID;
|
|
|
|
start &= PAGE_MASK;
|
|
end += (PAGE_SIZE - 1);
|
|
end &= PAGE_MASK;
|
|
if (mm != current->mm) {
|
|
saved_asid = get_asid();
|
|
set_asid(asid);
|
|
}
|
|
while (start < end) {
|
|
__flush_tlb_page(asid, start);
|
|
start += PAGE_SIZE;
|
|
}
|
|
if (saved_asid != MMU_NO_ASID)
|
|
set_asid(saved_asid);
|
|
}
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long flags;
|
|
int size;
|
|
|
|
local_irq_save(flags);
|
|
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
|
|
flush_tlb_all();
|
|
} else {
|
|
unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
|
|
unsigned long saved_asid = get_asid();
|
|
|
|
start &= PAGE_MASK;
|
|
end += (PAGE_SIZE - 1);
|
|
end &= PAGE_MASK;
|
|
set_asid(asid);
|
|
while (start < end) {
|
|
__flush_tlb_page(asid, start);
|
|
start += PAGE_SIZE;
|
|
}
|
|
set_asid(saved_asid);
|
|
}
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
/* Invalidate all TLB of this process. */
|
|
/* Instead of invalidating each TLB, we get new MMU context. */
|
|
if (mm->context != NO_CONTEXT) {
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
mm->context = NO_CONTEXT;
|
|
if (mm == current->mm)
|
|
activate_context(mm);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
void flush_tlb_all(void)
|
|
{
|
|
unsigned long flags, status;
|
|
|
|
/*
|
|
* Flush all the TLB.
|
|
*
|
|
* Write to the MMU control register's bit:
|
|
* TF-bit for SH-3, TI-bit for SH-4.
|
|
* It's same position, bit #2.
|
|
*/
|
|
local_irq_save(flags);
|
|
status = ctrl_inl(MMUCR);
|
|
status |= 0x04;
|
|
ctrl_outl(status, MMUCR);
|
|
ctrl_barrier();
|
|
local_irq_restore(flags);
|
|
}
|