mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 00:45:18 +07:00
17a68777bc
Cleanup struct cpuinfo_csky and struct thread_struct, remove all esp0 related code. We could get pt_regs from sp and backtrace could use fp in switch_stack. Signed-off-by: Guo Ren <ren_guo@c-sky.com>
210 lines
4.8 KiB
C
210 lines
4.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
|
#include <linux/signal.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/version.h>
|
|
#include <linux/vt_kern.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/extable.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <asm/hardirq.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/traps.h>
|
|
#include <asm/page.h>
|
|
|
|
int fixup_exception(struct pt_regs *regs)
|
|
{
|
|
const struct exception_table_entry *fixup;
|
|
|
|
fixup = search_exception_tables(instruction_pointer(regs));
|
|
if (fixup) {
|
|
regs->pc = fixup->nextinsn;
|
|
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* This routine handles page faults. It determines the address,
|
|
* and the problem, and then passes it off to one of the appropriate
|
|
* routines.
|
|
*/
|
|
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
|
unsigned long mmu_meh)
|
|
{
|
|
struct vm_area_struct *vma = NULL;
|
|
struct task_struct *tsk = current;
|
|
struct mm_struct *mm = tsk->mm;
|
|
int si_code;
|
|
int fault;
|
|
unsigned long address = mmu_meh & PAGE_MASK;
|
|
|
|
si_code = SEGV_MAPERR;
|
|
|
|
#ifndef CONFIG_CPU_HAS_TLBI
|
|
/*
|
|
* We fault-in kernel-space virtual memory on-demand. The
|
|
* 'reference' page table is init_mm.pgd.
|
|
*
|
|
* NOTE! We MUST NOT take any locks for this case. We may
|
|
* be in an interrupt or a critical region, and should
|
|
* only copy the information from the master page table,
|
|
* nothing more.
|
|
*/
|
|
if (unlikely(address >= VMALLOC_START) &&
|
|
unlikely(address <= VMALLOC_END)) {
|
|
/*
|
|
* Synchronize this task's top level page-table
|
|
* with the 'reference' page table.
|
|
*
|
|
* Do _not_ use "tsk" here. We might be inside
|
|
* an interrupt in the middle of a task switch..
|
|
*/
|
|
int offset = __pgd_offset(address);
|
|
pgd_t *pgd, *pgd_k;
|
|
pud_t *pud, *pud_k;
|
|
pmd_t *pmd, *pmd_k;
|
|
pte_t *pte_k;
|
|
|
|
unsigned long pgd_base;
|
|
|
|
pgd_base = tlb_get_pgd();
|
|
pgd = (pgd_t *)pgd_base + offset;
|
|
pgd_k = init_mm.pgd + offset;
|
|
|
|
if (!pgd_present(*pgd_k))
|
|
goto no_context;
|
|
set_pgd(pgd, *pgd_k);
|
|
|
|
pud = (pud_t *)pgd;
|
|
pud_k = (pud_t *)pgd_k;
|
|
if (!pud_present(*pud_k))
|
|
goto no_context;
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
pmd_k = pmd_offset(pud_k, address);
|
|
if (!pmd_present(*pmd_k))
|
|
goto no_context;
|
|
set_pmd(pmd, *pmd_k);
|
|
|
|
pte_k = pte_offset_kernel(pmd_k, address);
|
|
if (!pte_present(*pte_k))
|
|
goto no_context;
|
|
return;
|
|
}
|
|
#endif
|
|
/*
|
|
* If we're in an interrupt or have no user
|
|
* context, we must not take the fault..
|
|
*/
|
|
if (in_atomic() || !mm)
|
|
goto bad_area_nosemaphore;
|
|
|
|
down_read(&mm->mmap_sem);
|
|
vma = find_vma(mm, address);
|
|
if (!vma)
|
|
goto bad_area;
|
|
if (vma->vm_start <= address)
|
|
goto good_area;
|
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
goto bad_area;
|
|
if (expand_stack(vma, address))
|
|
goto bad_area;
|
|
/*
|
|
* Ok, we have a good vm_area for this memory access, so
|
|
* we can handle it..
|
|
*/
|
|
good_area:
|
|
si_code = SEGV_ACCERR;
|
|
|
|
if (write) {
|
|
if (!(vma->vm_flags & VM_WRITE))
|
|
goto bad_area;
|
|
} else {
|
|
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
|
goto bad_area;
|
|
}
|
|
|
|
/*
|
|
* If for any reason at all we couldn't handle the fault,
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
* the fault.
|
|
*/
|
|
fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
if (fault & VM_FAULT_OOM)
|
|
goto out_of_memory;
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
goto do_sigbus;
|
|
else if (fault & VM_FAULT_SIGSEGV)
|
|
goto bad_area;
|
|
BUG();
|
|
}
|
|
if (fault & VM_FAULT_MAJOR)
|
|
tsk->maj_flt++;
|
|
else
|
|
tsk->min_flt++;
|
|
|
|
up_read(&mm->mmap_sem);
|
|
return;
|
|
|
|
/*
|
|
* Something tried to access memory that isn't in our memory map..
|
|
* Fix it, but check if it's kernel or user first..
|
|
*/
|
|
bad_area:
|
|
up_read(&mm->mmap_sem);
|
|
|
|
bad_area_nosemaphore:
|
|
/* User mode accesses just cause a SIGSEGV */
|
|
if (user_mode(regs)) {
|
|
force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
|
|
return;
|
|
}
|
|
|
|
no_context:
|
|
/* Are we prepared to handle this kernel fault? */
|
|
if (fixup_exception(regs))
|
|
return;
|
|
|
|
/*
|
|
* Oops. The kernel tried to access some bad page. We'll have to
|
|
* terminate things with extreme prejudice.
|
|
*/
|
|
bust_spinlocks(1);
|
|
pr_alert("Unable to handle kernel paging request at virtual "
|
|
"address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
|
|
die_if_kernel("Oops", regs, write);
|
|
|
|
out_of_memory:
|
|
/*
|
|
* We ran out of memory, call the OOM killer, and return the userspace
|
|
* (which will retry the fault, or kill us if we got oom-killed).
|
|
*/
|
|
pagefault_out_of_memory();
|
|
return;
|
|
|
|
do_sigbus:
|
|
up_read(&mm->mmap_sem);
|
|
|
|
/* Kernel mode? Handle exceptions or die */
|
|
if (!user_mode(regs))
|
|
goto no_context;
|
|
|
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);
|
|
}
|