mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 10:49:25 +07:00
2923f5ea77
This patch includes the exception/interrupt entries, pt_reg structure and related accessors. /* Unaligned accessing handling*/ Andes processors cannot load/store information which is not naturally aligned on the bus, i.e., loading a 4 byte data whose start address must be divisible by 4. If unaligned data accessing is happened, data unaligned exception will be triggered and user will get SIGSEGV or kernel oops according to the unaligned address. In order to make user be able to load/store data from an unaligned address, software load/store emulation is implemented in arch/nds32/mm/alignment.c to address data unaligned exception. Unaligned accessing handling is disabled by default because it is not a normal case. User can enable this feature by following steps. A. Compile time: 1. Enable kernel config CONFIG_ALIGNMENT_TRAP B. Run time: 1. Enter /proc/sys/nds32/unaligned_acess folder 2. Write 1 to file enable_mode to enable unaligned accessing handling. User can disable it by writing 0 to this file. 3. Write 1 to file debug to show which unaligned address is under processing. User can disable it by writing 0 to this file. However, unaligned accessing handler cannot work if this unaligned address is not accessible such as protection violation. On this condition, the default behaviors for addressing data unaligned exception still happen Signed-off-by: Vincent Chen <vincentc@andestech.com> Signed-off-by: Greentime Hu <greentime@andestech.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
48 lines
1.0 KiB
C
48 lines
1.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2005-2017 Andes Technology Corporation
|
|
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/stacktrace.h>
|
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
{
|
|
save_stack_trace_tsk(current, trace);
|
|
}
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
{
|
|
unsigned long *fpn;
|
|
int skip = trace->skip;
|
|
int savesched;
|
|
|
|
if (tsk == current) {
|
|
__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
|
|
savesched = 1;
|
|
} else {
|
|
fpn = (unsigned long *)thread_saved_fp(tsk);
|
|
savesched = 0;
|
|
}
|
|
|
|
while (!kstack_end(fpn) && !((unsigned long)fpn & 0x3)
|
|
&& (fpn >= (unsigned long *)TASK_SIZE)) {
|
|
unsigned long lpp, fpp;
|
|
|
|
lpp = fpn[-1];
|
|
fpp = fpn[FP_OFFSET];
|
|
if (!__kernel_text_address(lpp))
|
|
break;
|
|
|
|
if (savesched || !in_sched_functions(lpp)) {
|
|
if (skip) {
|
|
skip--;
|
|
} else {
|
|
trace->entries[trace->nr_entries++] = lpp;
|
|
if (trace->nr_entries >= trace->max_entries)
|
|
break;
|
|
}
|
|
}
|
|
fpn = (unsigned long *)fpp;
|
|
}
|
|
}
|