2014-11-06 14:19:38 +07:00
|
|
|
/*
|
|
|
|
* linux/arch/nios2/kernel/entry.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013-2014 Altera Corporation
|
|
|
|
* Copyright (C) 2009, Wind River Systems Inc
|
|
|
|
*
|
|
|
|
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
|
|
|
|
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
|
|
|
|
* Kenneth Albanowski <kjahds@kjahds.com>,
|
|
|
|
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
|
|
|
|
* Copyright (C) 2004 Microtronix Datacom Ltd.
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Linux/m68k support by Hamish Macdonald
|
|
|
|
*
|
|
|
|
* 68060 fixes by Jesper Skov
|
|
|
|
* ColdFire support by Greg Ungerer (gerg@snapgear.com)
|
|
|
|
* 5307 fixes by David W. Miller
|
|
|
|
* linux 2.4 support David McCullough <davidm@snapgear.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sys.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/asm-macros.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/entry.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
|
|
|
|
.macro GET_THREAD_INFO reg
|
|
|
|
.if THREAD_SIZE & 0xffff0000
|
|
|
|
andhi \reg, sp, %hi(~(THREAD_SIZE-1))
|
|
|
|
.else
|
|
|
|
addi \reg, r0, %lo(~(THREAD_SIZE-1))
|
|
|
|
and \reg, \reg, sp
|
|
|
|
.endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro kuser_cmpxchg_check
|
|
|
|
/*
|
|
|
|
* Make sure our user space atomic helper is restarted if it was
|
|
|
|
* interrupted in a critical region.
|
|
|
|
* ea-4 = address of interrupted insn (ea must be preserved).
|
|
|
|
* sp = saved regs.
|
|
|
|
* cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
|
|
|
|
* If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
|
|
|
|
* cmpxchg_ldw + 4.
|
|
|
|
*/
|
|
|
|
/* et = cmpxchg_stw + 4 */
|
|
|
|
movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
|
|
|
|
bgtu ea, et, 1f
|
|
|
|
|
|
|
|
subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
|
|
|
|
bltu ea, et, 1f
|
|
|
|
stw et, PT_EA(sp) /* fix up EA */
|
|
|
|
mov ea, et
|
|
|
|
1:
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.section .rodata
|
|
|
|
.align 4
|
|
|
|
exception_table:
|
|
|
|
.word unhandled_exception /* 0 - Reset */
|
|
|
|
.word unhandled_exception /* 1 - Processor-only Reset */
|
|
|
|
.word external_interrupt /* 2 - Interrupt */
|
|
|
|
.word handle_trap /* 3 - Trap Instruction */
|
|
|
|
|
|
|
|
.word instruction_trap /* 4 - Unimplemented instruction */
|
|
|
|
.word handle_illegal /* 5 - Illegal instruction */
|
|
|
|
.word handle_unaligned /* 6 - Misaligned data access */
|
|
|
|
.word handle_unaligned /* 7 - Misaligned destination address */
|
|
|
|
|
|
|
|
.word handle_diverror /* 8 - Division error */
|
|
|
|
.word protection_exception_ba /* 9 - Supervisor-only instr. address */
|
|
|
|
.word protection_exception_instr /* 10 - Supervisor only instruction */
|
|
|
|
.word protection_exception_ba /* 11 - Supervisor only data address */
|
|
|
|
|
|
|
|
.word unhandled_exception /* 12 - Double TLB miss (data) */
|
|
|
|
.word protection_exception_pte /* 13 - TLB permission violation (x) */
|
|
|
|
.word protection_exception_pte /* 14 - TLB permission violation (r) */
|
|
|
|
.word protection_exception_pte /* 15 - TLB permission violation (w) */
|
|
|
|
|
|
|
|
.word unhandled_exception /* 16 - MPU region violation */
|
|
|
|
|
|
|
|
trap_table:
|
|
|
|
.word handle_system_call /* 0 */
|
2015-04-16 14:19:01 +07:00
|
|
|
.word handle_trap_1 /* 1 */
|
|
|
|
.word handle_trap_2 /* 2 */
|
|
|
|
.word handle_trap_3 /* 3 */
|
|
|
|
.word handle_trap_reserved /* 4 */
|
|
|
|
.word handle_trap_reserved /* 5 */
|
|
|
|
.word handle_trap_reserved /* 6 */
|
|
|
|
.word handle_trap_reserved /* 7 */
|
|
|
|
.word handle_trap_reserved /* 8 */
|
|
|
|
.word handle_trap_reserved /* 9 */
|
|
|
|
.word handle_trap_reserved /* 10 */
|
|
|
|
.word handle_trap_reserved /* 11 */
|
|
|
|
.word handle_trap_reserved /* 12 */
|
|
|
|
.word handle_trap_reserved /* 13 */
|
|
|
|
.word handle_trap_reserved /* 14 */
|
|
|
|
.word handle_trap_reserved /* 15 */
|
|
|
|
.word handle_trap_reserved /* 16 */
|
|
|
|
.word handle_trap_reserved /* 17 */
|
|
|
|
.word handle_trap_reserved /* 18 */
|
|
|
|
.word handle_trap_reserved /* 19 */
|
|
|
|
.word handle_trap_reserved /* 20 */
|
|
|
|
.word handle_trap_reserved /* 21 */
|
|
|
|
.word handle_trap_reserved /* 22 */
|
|
|
|
.word handle_trap_reserved /* 23 */
|
|
|
|
.word handle_trap_reserved /* 24 */
|
|
|
|
.word handle_trap_reserved /* 25 */
|
|
|
|
.word handle_trap_reserved /* 26 */
|
|
|
|
.word handle_trap_reserved /* 27 */
|
|
|
|
.word handle_trap_reserved /* 28 */
|
|
|
|
.word handle_trap_reserved /* 29 */
|
2015-02-16 18:26:43 +07:00
|
|
|
#ifdef CONFIG_KGDB
|
|
|
|
.word handle_kgdb_breakpoint /* 30 KGDB breakpoint */
|
|
|
|
#else
|
|
|
|
.word instruction_trap /* 30 */
|
|
|
|
#endif
|
2014-11-06 14:19:38 +07:00
|
|
|
.word handle_breakpoint /* 31 */
|
|
|
|
|
|
|
|
.text
|
|
|
|
.set noat
|
|
|
|
.set nobreak
|
|
|
|
|
|
|
|
ENTRY(inthandler)
|
|
|
|
SAVE_ALL
|
|
|
|
|
|
|
|
kuser_cmpxchg_check
|
|
|
|
|
|
|
|
/* Clear EH bit before we get a new excpetion in the kernel
|
|
|
|
* and after we have saved it to the exception frame. This is done
|
|
|
|
* whether it's trap, tlb-miss or interrupt. If we don't do this
|
|
|
|
* estatus is not updated the next exception.
|
|
|
|
*/
|
|
|
|
rdctl r24, status
|
|
|
|
movi r9, %lo(~STATUS_EH)
|
|
|
|
and r24, r24, r9
|
|
|
|
wrctl status, r24
|
|
|
|
|
|
|
|
/* Read cause and vector and branch to the associated handler */
|
|
|
|
mov r4, sp
|
|
|
|
rdctl r5, exception
|
|
|
|
movia r9, exception_table
|
|
|
|
add r24, r9, r5
|
|
|
|
ldw r24, 0(r24)
|
|
|
|
jmp r24
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Handle traps
|
|
|
|
***********************************************************************
|
|
|
|
*/
|
|
|
|
ENTRY(handle_trap)
|
2015-04-10 10:10:08 +07:00
|
|
|
ldwio r24, -4(ea) /* instruction that caused the exception */
|
2014-11-06 14:19:38 +07:00
|
|
|
srli r24, r24, 4
|
|
|
|
andi r24, r24, 0x7c
|
|
|
|
movia r9,trap_table
|
|
|
|
add r24, r24, r9
|
|
|
|
ldw r24, 0(r24)
|
|
|
|
jmp r24
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Handle system calls
|
|
|
|
***********************************************************************
|
|
|
|
*/
|
|
|
|
ENTRY(handle_system_call)
|
|
|
|
/* Enable interrupts */
|
|
|
|
rdctl r10, status
|
|
|
|
ori r10, r10, STATUS_PIE
|
|
|
|
wrctl status, r10
|
|
|
|
|
|
|
|
/* Reload registers destroyed by common code. */
|
|
|
|
ldw r4, PT_R4(sp)
|
|
|
|
ldw r5, PT_R5(sp)
|
|
|
|
|
|
|
|
local_restart:
|
|
|
|
/* Check that the requested system call is within limits */
|
|
|
|
movui r1, __NR_syscalls
|
|
|
|
bgeu r2, r1, ret_invsyscall
|
|
|
|
slli r1, r2, 2
|
|
|
|
movhi r11, %hiadj(sys_call_table)
|
|
|
|
add r1, r1, r11
|
|
|
|
ldw r1, %lo(sys_call_table)(r1)
|
|
|
|
beq r1, r0, ret_invsyscall
|
|
|
|
|
|
|
|
/* Check if we are being traced */
|
|
|
|
GET_THREAD_INFO r11
|
|
|
|
ldw r11,TI_FLAGS(r11)
|
|
|
|
BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call
|
|
|
|
|
|
|
|
/* Execute the system call */
|
|
|
|
callr r1
|
|
|
|
|
|
|
|
/* If the syscall returns a negative result:
|
|
|
|
* Set r7 to 1 to indicate error,
|
|
|
|
* Negate r2 to get a positive error code
|
|
|
|
* If the syscall returns zero or a positive value:
|
|
|
|
* Set r7 to 0.
|
|
|
|
* The sigreturn system calls will skip the code below by
|
|
|
|
* adding to register ra. To avoid destroying registers
|
|
|
|
*/
|
|
|
|
translate_rc_and_ret:
|
|
|
|
movi r1, 0
|
|
|
|
bge r2, zero, 3f
|
|
|
|
sub r2, zero, r2
|
|
|
|
movi r1, 1
|
|
|
|
3:
|
|
|
|
stw r2, PT_R2(sp)
|
|
|
|
stw r1, PT_R7(sp)
|
|
|
|
end_translate_rc_and_ret:
|
|
|
|
|
|
|
|
ret_from_exception:
|
|
|
|
ldw r1, PT_ESTATUS(sp)
|
|
|
|
/* if so, skip resched, signals */
|
|
|
|
TSTBNZ r1, r1, ESTATUS_EU, Luser_return
|
|
|
|
|
|
|
|
restore_all:
|
|
|
|
rdctl r10, status /* disable intrs */
|
|
|
|
andi r10, r10, %lo(~STATUS_PIE)
|
|
|
|
wrctl status, r10
|
|
|
|
RESTORE_ALL
|
|
|
|
eret
|
|
|
|
|
|
|
|
/* If the syscall number was invalid return ENOSYS */
|
|
|
|
ret_invsyscall:
|
|
|
|
movi r2, -ENOSYS
|
|
|
|
br translate_rc_and_ret
|
|
|
|
|
|
|
|
/* This implements the same as above, except it calls
|
|
|
|
* do_syscall_trace_enter and do_syscall_trace_exit before and after the
|
|
|
|
* syscall in order for utilities like strace and gdb to work.
|
|
|
|
*/
|
|
|
|
traced_system_call:
|
|
|
|
SAVE_SWITCH_STACK
|
|
|
|
call do_syscall_trace_enter
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
|
|
|
|
/* Create system call register arguments. The 5th and 6th
|
|
|
|
arguments on stack are already in place at the beginning
|
|
|
|
of pt_regs. */
|
|
|
|
ldw r2, PT_R2(sp)
|
|
|
|
ldw r4, PT_R4(sp)
|
|
|
|
ldw r5, PT_R5(sp)
|
|
|
|
ldw r6, PT_R6(sp)
|
|
|
|
ldw r7, PT_R7(sp)
|
|
|
|
|
|
|
|
/* Fetch the syscall function, we don't need to check the boundaries
|
|
|
|
* since this is already done.
|
|
|
|
*/
|
|
|
|
slli r1, r2, 2
|
|
|
|
movhi r11,%hiadj(sys_call_table)
|
|
|
|
add r1, r1, r11
|
|
|
|
ldw r1, %lo(sys_call_table)(r1)
|
|
|
|
|
|
|
|
callr r1
|
|
|
|
|
|
|
|
/* If the syscall returns a negative result:
|
|
|
|
* Set r7 to 1 to indicate error,
|
|
|
|
* Negate r2 to get a positive error code
|
|
|
|
* If the syscall returns zero or a positive value:
|
|
|
|
* Set r7 to 0.
|
|
|
|
* The sigreturn system calls will skip the code below by
|
|
|
|
* adding to register ra. To avoid destroying registers
|
|
|
|
*/
|
|
|
|
translate_rc_and_ret2:
|
|
|
|
movi r1, 0
|
|
|
|
bge r2, zero, 4f
|
|
|
|
sub r2, zero, r2
|
|
|
|
movi r1, 1
|
|
|
|
4:
|
|
|
|
stw r2, PT_R2(sp)
|
|
|
|
stw r1, PT_R7(sp)
|
|
|
|
end_translate_rc_and_ret2:
|
|
|
|
SAVE_SWITCH_STACK
|
|
|
|
call do_syscall_trace_exit
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
Luser_return:
|
|
|
|
GET_THREAD_INFO r11 /* get thread_info pointer */
|
|
|
|
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
|
|
|
|
ANDI32 r11, r10, _TIF_WORK_MASK
|
|
|
|
beq r11, r0, restore_all /* Nothing to do */
|
|
|
|
BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return
|
|
|
|
|
|
|
|
/* Reschedule work */
|
|
|
|
call schedule
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
Lsignal_return:
|
|
|
|
ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
|
|
|
|
beq r1, r0, restore_all
|
|
|
|
mov r4, sp /* pt_regs */
|
|
|
|
SAVE_SWITCH_STACK
|
|
|
|
call do_notify_resume
|
|
|
|
beq r2, r0, no_work_pending
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
/* prepare restart syscall here without leaving kernel */
|
|
|
|
ldw r2, PT_R2(sp) /* reload syscall number in r2 */
|
|
|
|
ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */
|
|
|
|
ldw r5, PT_R5(sp)
|
|
|
|
ldw r6, PT_R6(sp)
|
|
|
|
ldw r7, PT_R7(sp)
|
|
|
|
ldw r8, PT_R8(sp)
|
|
|
|
ldw r9, PT_R9(sp)
|
|
|
|
br local_restart /* restart syscall */
|
|
|
|
|
|
|
|
no_work_pending:
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Handle external interrupts.
|
|
|
|
***********************************************************************
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* This is the generic interrupt handler (for all hardware interrupt
|
|
|
|
* sources). It figures out the vector number and calls the appropriate
|
|
|
|
* interrupt service routine directly.
|
|
|
|
*/
|
|
|
|
external_interrupt:
|
|
|
|
rdctl r12, ipending
|
|
|
|
rdctl r9, ienable
|
|
|
|
and r12, r12, r9
|
|
|
|
/* skip if no interrupt is pending */
|
|
|
|
beq r12, r0, ret_from_interrupt
|
|
|
|
|
|
|
|
movi r24, -1
|
|
|
|
stw r24, PT_ORIG_R2(sp)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process an external hardware interrupt.
|
|
|
|
*/
|
|
|
|
|
|
|
|
addi ea, ea, -4 /* re-issue the interrupted instruction */
|
|
|
|
stw ea, PT_EA(sp)
|
|
|
|
2: movi r4, %lo(-1) /* Start from bit position 0,
|
|
|
|
highest priority */
|
|
|
|
/* This is the IRQ # for handler call */
|
|
|
|
1: andi r10, r12, 1 /* Isolate bit we are interested in */
|
|
|
|
srli r12, r12, 1 /* shift count is costly without hardware
|
|
|
|
multiplier */
|
|
|
|
addi r4, r4, 1
|
|
|
|
beq r10, r0, 1b
|
|
|
|
mov r5, sp /* Setup pt_regs pointer for handler call */
|
|
|
|
call do_IRQ
|
|
|
|
rdctl r12, ipending /* check again if irq still pending */
|
|
|
|
rdctl r9, ienable /* Isolate possible interrupts */
|
|
|
|
and r12, r12, r9
|
|
|
|
bne r12, r0, 2b
|
|
|
|
/* br ret_from_interrupt */ /* fall through to ret_from_interrupt */
|
|
|
|
|
|
|
|
ENTRY(ret_from_interrupt)
|
|
|
|
ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
|
|
|
|
TSTBNZ r1, r1, ESTATUS_EU, Luser_return
|
|
|
|
|
2019-10-16 02:18:01 +07:00
|
|
|
#ifdef CONFIG_PREEMPTION
|
2014-11-06 14:19:38 +07:00
|
|
|
GET_THREAD_INFO r1
|
|
|
|
ldw r4, TI_PREEMPT_COUNT(r1)
|
|
|
|
bne r4, r0, restore_all
|
|
|
|
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
|
|
|
|
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
|
|
|
|
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
|
|
|
|
andi r10, r4, ESTATUS_EPIE
|
|
|
|
beq r10, r0, restore_all
|
2014-12-31 09:53:11 +07:00
|
|
|
call preempt_schedule_irq
|
2014-11-06 14:19:38 +07:00
|
|
|
#endif
|
2014-12-31 09:53:11 +07:00
|
|
|
br restore_all
|
2014-11-06 14:19:38 +07:00
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* A few syscall wrappers
|
|
|
|
***********************************************************************
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* int clone(unsigned long clone_flags, unsigned long newsp,
|
|
|
|
* int __user * parent_tidptr, int __user * child_tidptr,
|
|
|
|
* int tls_val)
|
|
|
|
*/
|
|
|
|
ENTRY(sys_clone)
|
|
|
|
SAVE_SWITCH_STACK
|
2021-02-19 13:41:03 +07:00
|
|
|
subi sp, sp, 4 /* make space for tls pointer */
|
|
|
|
stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
|
nios2: enable HAVE_COPY_THREAD_TLS, switch to kernel_clone_args
This is part of a larger series that aims at getting rid of the
copy_thread()/copy_thread_tls() split that makes the process creation
codepaths in the kernel more convoluted and error-prone than they need
to be.
I'm converting all the remaining arches that haven't yet switched and
am collecting individual acks. Once I have them, I'll send the whole series
removing the copy_thread()/copy_thread_tls() split, the
HAVE_COPY_THREAD_TLS define and the legacy do_fork() helper. The only
kernel-wide process creation entry point for anything not going directly
through the syscall path will then be based on struct kernel_clone_args.
No more danger of weird process creation abi quirks between architectures
hopefully, and easier to maintain overall.
It also unblocks implementing clone3() on architectures not support
copy_thread_tls(). Any architecture that wants to implement clone3()
will need to select HAVE_COPY_THREAD_TLS and thus need to implement
copy_thread_tls(). So both goals are connected but independently
beneficial.
HAVE_COPY_THREAD_TLS means that a given architecture supports
CLONE_SETTLS and not setting it should usually mean that the
architectures doesn't implement it but that's not how things are. In
fact all architectures support CLONE_TLS it's just that they don't
follow the calling convention that HAVE_COPY_THREAD_TLS implies. That
means all architectures can be switched over to select
HAVE_COPY_THREAD_TLS. Once that is done we can remove that macro (yay,
less code), the unnecessary do_fork() export in kernel/fork.c, and also
rename copy_thread_tls() back to copy_thread(). At this point
copy_thread() becomes the main architecture specific part of process
creation but it will be the same layout and calling convention for all
architectures. (Once that is done we can probably cleanup each
copy_thread() function even more but that's for the future.)
Since nios2 does support CLONE_SETTLS there's no reason to not select
HAVE_COPY_THREAD_TLS. This brings us one step closer to getting rid of
the copy_thread()/copy_thread_tls() split we still have and ultimately
the HAVE_COPY_THREAD_TLS define in general. A lot of architectures have
already converted and nios2 is one of the few hat haven't yet. This also
unblocks implementing the clone3() syscall on nios2. Once that is done we
can get of another ARCH_WANTS_* macro.
Once Any architecture that supports HAVE_COPY_THREAD_TLS cannot call the
do_fork() helper anymore. This is fine and intended since it should be
removed in favor of the new, cleaner _do_fork() calling convention based
on struct kernel_clone_args. In fact, most architectures have already
switched. With this patch, nios2 joins the other arches which can't use
the fork(), vfork(), clone(), clone3() syscalls directly and who follow
the new process creation calling convention that is based on struct
kernel_clone_args which we introduced a while back. This means less
custom assembly in the architectures entry path to set up the registers
before calling into the process creation helper and it is easier to to
support new features without having to adapt calling conventions. It
also unifies all process creation paths between fork(), vfork(),
clone(), and clone3(). (We can't fix the ABI nightmare that legacy
clone() is but we can prevent stuff like this happening in the future.)
For some more context, please see:
commit 606e9ad20094f6d500166881d301f31a51bc8aa7
Merge: ac61145a725a 457677c70c76
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date: Sat Jan 11 15:33:48 2020 -0800
Merge tag 'clone3-tls-v5.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux
Pull thread fixes from Christian Brauner:
"This contains a series of patches to fix CLONE_SETTLS when used with
clone3().
The clone3() syscall passes the tls argument through struct clone_args
instead of a register. This means, all architectures that do not
implement copy_thread_tls() but still support CLONE_SETTLS via
copy_thread() expecting the tls to be located in a register argument
based on clone() are currently unfortunately broken. Their tls value
will be garbage.
The patch series fixes this on all architectures that currently define
__ARCH_WANT_SYS_CLONE3. It also adds a compile-time check to ensure
that any architecture that enables clone3() in the future is forced to
also implement copy_thread_tls().
My ultimate goal is to get rid of the copy_thread()/copy_thread_tls()
split and just have copy_thread_tls() at some point in the not too
distant future (Maybe even renaming copy_thread_tls() back to simply
copy_thread() once the old function is ripped from all arches). This
is dependent now on all arches supporting clone3().
While all relevant arches do that now there are still four missing:
ia64, m68k, sh and sparc. They have the system call reserved, but not
implemented. Once they all implement clone3() we can get rid of
ARCH_WANT_SYS_CLONE3 and HAVE_COPY_THREAD_TLS.
Note that in the meantime, m68k has already switched to the new calling
convention. And I've got sparc patches acked by Dave and ia64 is already
done too. You can find a link to a booting qemu nios2 system with all the
changes here at [1].
[1]: https://asciinema.org/a/333353
Cc: linux-kernel@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Ley Foon Tan <ley.foon.tan@intel.com>
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
2020-05-07 20:14:27 +07:00
|
|
|
call nios2_clone
|
2021-02-19 13:41:03 +07:00
|
|
|
addi sp, sp, 4
|
2014-11-06 14:19:38 +07:00
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
ret
|
|
|
|
|
|
|
|
ENTRY(sys_rt_sigreturn)
|
|
|
|
SAVE_SWITCH_STACK
|
|
|
|
mov r4, sp
|
|
|
|
call do_rt_sigreturn
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
|
|
|
|
ret
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* A few other wrappers and stubs
|
|
|
|
***********************************************************************
|
|
|
|
*/
|
|
|
|
protection_exception_pte:
|
|
|
|
rdctl r6, pteaddr
|
|
|
|
slli r6, r6, 10
|
|
|
|
call do_page_fault
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
protection_exception_ba:
|
|
|
|
rdctl r6, badaddr
|
|
|
|
call do_page_fault
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
protection_exception_instr:
|
|
|
|
call handle_supervisor_instr
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
handle_breakpoint:
|
|
|
|
call breakpoint_c
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
#ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
|
|
|
|
handle_unaligned:
|
|
|
|
SAVE_SWITCH_STACK
|
|
|
|
call handle_unaligned_c
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
br ret_from_exception
|
|
|
|
#else
|
|
|
|
handle_unaligned:
|
|
|
|
call handle_unaligned_c
|
|
|
|
br ret_from_exception
|
|
|
|
#endif
|
|
|
|
|
|
|
|
handle_illegal:
|
|
|
|
call handle_illegal_c
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
handle_diverror:
|
|
|
|
call handle_diverror_c
|
|
|
|
br ret_from_exception
|
|
|
|
|
2015-02-16 18:26:43 +07:00
|
|
|
#ifdef CONFIG_KGDB
|
|
|
|
handle_kgdb_breakpoint:
|
|
|
|
call kgdb_breakpoint_c
|
|
|
|
br ret_from_exception
|
|
|
|
#endif
|
|
|
|
|
2015-04-16 14:19:01 +07:00
|
|
|
handle_trap_1:
|
|
|
|
call handle_trap_1_c
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
handle_trap_2:
|
|
|
|
call handle_trap_2_c
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
handle_trap_3:
|
|
|
|
handle_trap_reserved:
|
|
|
|
call handle_trap_3_c
|
|
|
|
br ret_from_exception
|
|
|
|
|
2014-11-06 14:19:38 +07:00
|
|
|
/*
|
|
|
|
* Beware - when entering resume, prev (the current task) is
|
|
|
|
* in r4, next (the new task) is in r5, don't change these
|
|
|
|
* registers.
|
|
|
|
*/
|
|
|
|
ENTRY(resume)
|
|
|
|
|
|
|
|
rdctl r7, status /* save thread status reg */
|
|
|
|
stw r7, TASK_THREAD + THREAD_KPSR(r4)
|
|
|
|
|
|
|
|
andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */
|
|
|
|
wrctl status, r7
|
|
|
|
|
|
|
|
SAVE_SWITCH_STACK
|
|
|
|
stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
|
|
|
|
ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
|
|
|
|
movia r24, _current_thread /* save thread */
|
|
|
|
GET_THREAD_INFO r1
|
|
|
|
stw r1, 0(r24)
|
|
|
|
RESTORE_SWITCH_STACK
|
|
|
|
|
|
|
|
ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
|
|
|
|
wrctl status, r7
|
|
|
|
ret
|
|
|
|
|
|
|
|
ENTRY(ret_from_fork)
|
|
|
|
call schedule_tail
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
ENTRY(ret_from_kernel_thread)
|
|
|
|
call schedule_tail
|
|
|
|
mov r4,r17 /* arg */
|
|
|
|
callr r16 /* function */
|
|
|
|
br ret_from_exception
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernel user helpers.
|
|
|
|
*
|
|
|
|
* Each segment is 64-byte aligned and will be mapped to the <User space>.
|
|
|
|
* New segments (if ever needed) must be added after the existing ones.
|
|
|
|
* This mechanism should be used only for things that are really small and
|
|
|
|
* justified, and not be abused freely.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Filling pads with undefined instructions. */
|
|
|
|
.macro kuser_pad sym size
|
|
|
|
.if ((. - \sym) & 3)
|
|
|
|
.rept (4 - (. - \sym) & 3)
|
|
|
|
.byte 0
|
|
|
|
.endr
|
|
|
|
.endif
|
|
|
|
.rept ((\size - (. - \sym)) / 4)
|
|
|
|
.word 0xdeadbeef
|
|
|
|
.endr
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.align 6
|
|
|
|
.globl __kuser_helper_start
|
|
|
|
__kuser_helper_start:
|
|
|
|
|
|
|
|
__kuser_helper_version: /* @ 0x1000 */
|
|
|
|
.word ((__kuser_helper_end - __kuser_helper_start) >> 6)
|
|
|
|
|
|
|
|
__kuser_cmpxchg: /* @ 0x1004 */
|
|
|
|
/*
|
|
|
|
* r4 pointer to exchange variable
|
|
|
|
* r5 old value
|
|
|
|
* r6 new value
|
|
|
|
*/
|
|
|
|
cmpxchg_ldw:
|
|
|
|
ldw r2, 0(r4) /* load current value */
|
|
|
|
sub r2, r2, r5 /* compare with old value */
|
|
|
|
bne r2, zero, cmpxchg_ret
|
|
|
|
|
|
|
|
/* We had a match, store the new value */
|
|
|
|
cmpxchg_stw:
|
|
|
|
stw r6, 0(r4)
|
|
|
|
cmpxchg_ret:
|
|
|
|
ret
|
|
|
|
|
|
|
|
kuser_pad __kuser_cmpxchg, 64
|
|
|
|
|
|
|
|
.globl __kuser_sigtramp
|
|
|
|
__kuser_sigtramp:
|
|
|
|
movi r2, __NR_rt_sigreturn
|
|
|
|
trap
|
|
|
|
|
|
|
|
kuser_pad __kuser_sigtramp, 64
|
|
|
|
|
|
|
|
.globl __kuser_helper_end
|
|
|
|
__kuser_helper_end:
|