linux_dsm_epyc7002/arch/m68k/68360/entry.S

165 lines
3.8 KiB
ArmAsm
Raw Normal View History

/*
* entry.S - non-mmu 68360 interrupt and exceptions entry points
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2001 SED Systems, a Division of Calian Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
* M68360 Port by SED Systems, and Lineo.
*/
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
.text
.globl system_call
.globl resume
.globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table
.globl bad_interrupt
.globl inthandler
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_exception
do_trace:
movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d1
movel #-ENOSYS,%d0
cmpl #NR_syscalls,%d1
jcc 1f
lsl #2,%d1
lea sys_call_table, %a0
jbsr %a0@(%d1)
1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
jra ret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
/* save top of frame*/
pea %sp@
jbsr set_esp0
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d0
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
lsl #2,%d0
lea sys_call_table,%a0
movel %a0@(%d0), %a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
ret_from_exception:
btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
jeq Luser_return /* if so, skip resched, signals*/
Lkernel_return:
RESTORE_ALL
Luser_return:
/* only allow interrupts when we are really the last one on the*/
/* kernel stack, otherwise stack overflow can occur during*/
/* heavy interrupt load*/
andw #ALLOWINT,%sr
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
1:
move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
jne Lwork_to_do
RESTORE_ALL
Lwork_to_do:
movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
btst #TIF_NEED_RESCHED,%d1
jne reschedule
Lsignal_return:
subql #4,%sp /* dummy return address*/
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrw do_notify_resume
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jra 1b
/*
* This is the main interrupt handler, responsible for calling do_IRQ()
*/
inthandler:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and.l #0x3ff, %d0
lsr.l #0x02, %d0
movel %sp,%sp@-
movel %d0,%sp@- /* put vector # on stack*/
m68k: Simplify low level interrupt handling code The low level interrupt entry code of m68k contains the following: add_preempt_count(HARDIRQ_OFFSET); do_IRQ(); irq_enter(); add_preempt_count(HARDIRQ_OFFSET); handle_interrupt(); irq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (in_interrupt()) return; <---- On m68k always taken! if (local_softirq_pending()) do_softirq(); sub_preempt_count(HARDIRQ_OFFSET); if (in_hardirq()) return; if (status_on_stack_has_interrupt_priority_mask > 0) return; if (local_softirq_pending()) do_softirq(); ret_from_exception: if (interrupted_context_is_kernel) return: .... I tried to find a proper explanation for this, but the changelog is sparse and there are no mails explaining it further. But obviously this relates to the interrupt priority levels of the m68k and tries to be extra clever with nested interrupts. Though this cleverness just adds code bloat to the interrupt hotpath. For the common case of non nested interrupts the code runs through two extra conditionals to the only important one, which checks whether the return is to kernel or user space. For the nested case the checks for in_hardirq() and the priority mask value on stack catch only the case where the nested interrupt happens inside the hard irq context of the first interrupt. If the nested interrupt happens while the first interrupt handles soft interrupts, then these extra checks buy nothing. The nested interrupt will fall through to the final kernel/user space return check at ret_from_exception. Changing the code flow in the following way: do_IRQ(); irq_enter(); add_preempt_count(HARDIRQ_OFFSET); handle_interrupt(); irq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (in_interrupt()) return; if (local_softirq_pending()) do_softirq(); ret_from_exception: if (interrupted_context_is_kernel) return: makes the region protected by the hardirq count slightly smaller and the softirq handling is invoked with a minimal deeper stack. But otherwise it's completely functional equivalent and saves 104 bytes of text in arch/m68k/kernel/entry.o. This modification allows us further to get rid of the limitations which m68k puts on the preempt_count layout, so we can make the preempt count bits completely generic. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Schmitz <schmitz@biophys.uni-duesseldorf.de> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Linux/m68k <linux-m68k@vger.kernel.org> Cc: Andreas Schwab <schwab@linux-m68k.org> Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311112052360.30673@ionos.tec.linutronix.de
2013-11-12 03:01:03 +07:00
jbsr do_IRQ /* process the IRQ */
addql #8,%sp /* pop parameters off stack*/
jra ret_from_exception
/*
* Handler for uninitialized and spurious interrupts.
*/
bad_interrupt:
addql #1,irq_err_count
rte
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movel %a0,%d1 /* save prev thread in d1 */
movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
movel %usp,%a3 /* save usp */
movel %a3,%a0@(TASK_THREAD+THREAD_USP)
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
movel %a3,%usp
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
RESTORE_SWITCH_STACK
movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
rts