2013-01-18 16:42:18 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Amit Bhor, Kanika Nema: Codito Technologies 2004
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-09 00:51:36 +07:00
|
|
|
#include <linux/sched/task.h>
|
2017-02-09 00:51:37 +07:00
|
|
|
#include <linux/sched/task_stack.h>
|
2017-02-09 00:51:36 +07:00
|
|
|
|
2013-01-18 16:42:18 +07:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/tick.h>
|
|
|
|
|
|
|
|
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
|
|
|
|
{
|
|
|
|
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We return the user space TLS data ptr as sys-call return code
|
|
|
|
* Ideally it should be copy to user.
|
|
|
|
* However we can cheat by the fact that some sys-calls do return
|
|
|
|
* absurdly high values
|
|
|
|
* Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
|
|
|
|
* it won't be considered a sys-call error
|
|
|
|
* and it will be loads better than copy-to-user, which is a definite
|
|
|
|
* D-TLB Miss
|
|
|
|
*/
|
|
|
|
SYSCALL_DEFINE0(arc_gettls)
|
|
|
|
{
|
|
|
|
return task_thread_info(current)->thr_ptr;
|
|
|
|
}
|
2013-01-18 16:42:18 +07:00
|
|
|
|
2016-10-20 21:39:45 +07:00
|
|
|
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|
|
|
{
|
2016-11-08 01:36:46 +07:00
|
|
|
struct pt_regs *regs = current_pt_regs();
|
|
|
|
int uval = -EFAULT;
|
2016-10-20 21:39:45 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
|
|
|
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
|
|
|
* And this also helps reduce the overhead for serializing in
|
|
|
|
* the UP case
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
|
|
|
|
2016-11-08 01:36:46 +07:00
|
|
|
/* Z indicates to userspace if operation succeded */
|
|
|
|
regs->status32 &= ~STATUS_Z_MASK;
|
|
|
|
|
2016-10-20 21:39:45 +07:00
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
2016-11-08 01:36:46 +07:00
|
|
|
if (__get_user(uval, uaddr))
|
2016-10-20 21:39:45 +07:00
|
|
|
goto done;
|
|
|
|
|
2016-11-08 01:36:46 +07:00
|
|
|
if (uval == expected) {
|
|
|
|
if (!__put_user(new, uaddr))
|
|
|
|
regs->status32 |= STATUS_Z_MASK;
|
|
|
|
}
|
2016-10-20 21:39:45 +07:00
|
|
|
|
|
|
|
done:
|
|
|
|
preempt_enable();
|
|
|
|
|
2016-11-08 01:36:46 +07:00
|
|
|
return uval;
|
2016-10-20 21:39:45 +07:00
|
|
|
}
|
|
|
|
|
2017-06-03 01:49:10 +07:00
|
|
|
#ifdef CONFIG_ISA_ARCV2
|
|
|
|
|
2013-03-22 04:49:36 +07:00
|
|
|
void arch_cpu_idle(void)
|
2013-01-18 16:42:18 +07:00
|
|
|
{
|
2017-06-03 01:49:10 +07:00
|
|
|
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
|
|
|
|
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
|
|
|
|
2015-11-16 15:22:07 +07:00
|
|
|
__asm__ __volatile__(
|
|
|
|
"sleep %0 \n"
|
|
|
|
:
|
2017-06-03 01:49:10 +07:00
|
|
|
:"I"(arg)); /* can't be "r" has to be embedded const */
|
|
|
|
}
|
|
|
|
|
2017-05-28 13:52:06 +07:00
|
|
|
#elif defined(CONFIG_EZNPS_MTM_EXT) /* ARC700 variant in NPS */
|
|
|
|
|
|
|
|
void arch_cpu_idle(void)
|
|
|
|
{
|
|
|
|
/* only the calling HW thread needs to sleep */
|
|
|
|
__asm__ __volatile__(
|
|
|
|
".word %0 \n"
|
|
|
|
:
|
|
|
|
:"i"(CTOP_INST_HWSCHD_WFT_IE12));
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* ARC700 */
|
2017-06-03 01:49:10 +07:00
|
|
|
|
|
|
|
void arch_cpu_idle(void)
|
|
|
|
{
|
|
|
|
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
|
|
|
|
__asm__ __volatile__("sleep 0x3 \n");
|
2013-01-18 16:42:18 +07:00
|
|
|
}
|
|
|
|
|
2017-06-03 01:49:10 +07:00
|
|
|
#endif
|
|
|
|
|
2013-01-18 16:42:18 +07:00
|
|
|
asmlinkage void ret_from_fork(void);
|
|
|
|
|
2015-03-14 01:04:18 +07:00
|
|
|
/*
|
|
|
|
* Copy architecture-specific thread state
|
|
|
|
*
|
|
|
|
* Layout of Child kernel mode stack as setup at the end of this function is
|
2013-01-18 16:42:18 +07:00
|
|
|
*
|
|
|
|
* | ... |
|
|
|
|
* | ... |
|
|
|
|
* | unused |
|
|
|
|
* | |
|
|
|
|
* ------------------
|
2013-05-27 23:13:41 +07:00
|
|
|
* | r25 | <==== top of Stack (thread.ksp)
|
2013-01-18 16:42:18 +07:00
|
|
|
* ~ ~
|
2015-08-19 18:53:58 +07:00
|
|
|
* | --to-- | (CALLEE Regs of kernel mode)
|
2013-01-18 16:42:18 +07:00
|
|
|
* | r13 |
|
|
|
|
* ------------------
|
|
|
|
* | fp |
|
|
|
|
* | blink | @ret_from_fork
|
|
|
|
* ------------------
|
|
|
|
* | |
|
|
|
|
* ~ ~
|
|
|
|
* ~ ~
|
|
|
|
* | |
|
|
|
|
* ------------------
|
|
|
|
* | r12 |
|
|
|
|
* ~ ~
|
|
|
|
* | --to-- | (scratch Regs of user mode)
|
|
|
|
* | r0 |
|
2013-05-28 14:54:43 +07:00
|
|
|
* ------------------
|
|
|
|
* | SP |
|
|
|
|
* | orig_r0 |
|
2013-06-11 20:26:54 +07:00
|
|
|
* | event/ECR |
|
ARC: pt_regs update #4: r25 saved/restored unconditionally
(This is a VERY IMP change for low level interrupt/exception handling)
-----------------------------------------------------------------------
WHAT
-----------------------------------------------------------------------
* User 25 now saved in pt_regs->user_r25 (vs. tsk->thread_info.user_r25)
* This allows Low level interrupt code to unconditionally save r25
(vs. the prev version which would only do it for U->K transition).
Ofcourse for nested interrupts, only the pt_regs->user_r25 of
bottom-most frame is useful.
* simplifies the interrupt prologue/epilogue
* Needed for ARCv2 ISA code and done here to keep design similar with
ARCompact event handling
-----------------------------------------------------------------------
WHY
-------------------------------------------------------------------------
With CONFIG_ARC_CURR_IN_REG, r25 is used to cache "current" task pointer
in kernel mode. So when entering kernel mode from User Mode
- user r25 is specially safe-kept (it being a callee reg is NOT part of
pt_regs which are saved by default on each interrupt/trap/exception)
- r25 loaded with current task pointer.
Further, if interrupt was taken in kernel mode, this is skipped since we
know that r25 already has valid "current" pointer.
With 2 level of interrupts in ARCompact ISA, detecting this is difficult
but still possible, since we could be in kernel mode but r25 not already saved
(in fact the stack itself might not have been switched).
A. User mode
B. L1 IRQ taken
C. L2 IRQ taken (while on 1st line of L1 ISR)
So in #C, although in kernel mode, r25 not saved (infact SP not
switched at all)
Given that ARcompact has manual stack switching, we could use a bit of
trickey - The low level code would make sure that SP is only set to kernel
mode value at the very end (after saving r25). So a non kernel mode SP,
even if in kernel mode, meant r25 was NOT saved.
The same paradigm won't work in ARCv2 ISA since SP is auto-switched so
it's setting can't be delayed/constrained.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-28 15:20:41 +07:00
|
|
|
* | user_r25 |
|
2013-01-18 16:42:18 +07:00
|
|
|
* ------------------ <===== END of PAGE
|
|
|
|
*/
|
|
|
|
int copy_thread(unsigned long clone_flags,
|
2015-03-14 01:04:18 +07:00
|
|
|
unsigned long usp, unsigned long kthread_arg,
|
2013-01-18 16:42:18 +07:00
|
|
|
struct task_struct *p)
|
|
|
|
{
|
|
|
|
struct pt_regs *c_regs; /* child's pt_regs */
|
|
|
|
unsigned long *childksp; /* to unwind out of __switch_to() */
|
|
|
|
struct callee_regs *c_callee; /* child's callee regs */
|
|
|
|
struct callee_regs *parent_callee; /* paren't callee */
|
|
|
|
struct pt_regs *regs = current_pt_regs();
|
|
|
|
|
|
|
|
/* Mark the specific anchors to begin with (see pic above) */
|
|
|
|
c_regs = task_pt_regs(p);
|
|
|
|
childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
|
|
|
|
c_callee = ((struct callee_regs *)childksp) - 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __switch_to() uses thread.ksp to start unwinding stack
|
|
|
|
* For kernel threads we don't need to create callee regs, the
|
|
|
|
* stack layout nevertheless needs to remain the same.
|
|
|
|
* Also, since __switch_to anyways unwinds callee regs, we use
|
|
|
|
* this to populate kernel thread entry-pt/args into callee regs,
|
|
|
|
* so that ret_from_kernel_thread() becomes simpler.
|
|
|
|
*/
|
|
|
|
p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
|
|
|
|
|
|
|
|
/* __switch_to expects FP(0), BLINK(return addr) at top */
|
|
|
|
childksp[0] = 0; /* fp */
|
|
|
|
childksp[1] = (unsigned long)ret_from_fork; /* blink */
|
|
|
|
|
|
|
|
if (unlikely(p->flags & PF_KTHREAD)) {
|
|
|
|
memset(c_regs, 0, sizeof(struct pt_regs));
|
|
|
|
|
2015-03-14 01:04:18 +07:00
|
|
|
c_callee->r13 = kthread_arg;
|
2013-01-18 16:42:18 +07:00
|
|
|
c_callee->r14 = usp; /* function */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*--------- User Task Only --------------*/
|
|
|
|
|
|
|
|
/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
|
|
|
|
childksp[0] = 0; /* for POP fp */
|
|
|
|
childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
|
|
|
|
|
|
|
|
/* Copy parents pt regs on child's kernel mode stack */
|
|
|
|
*c_regs = *regs;
|
|
|
|
|
|
|
|
if (usp)
|
|
|
|
c_regs->sp = usp;
|
|
|
|
|
|
|
|
c_regs->r0 = 0; /* fork returns 0 in child */
|
|
|
|
|
|
|
|
parent_callee = ((struct callee_regs *)regs) - 1;
|
|
|
|
*c_callee = *parent_callee;
|
|
|
|
|
|
|
|
if (unlikely(clone_flags & CLONE_SETTLS)) {
|
|
|
|
/*
|
|
|
|
* set task's userland tls data ptr from 4th arg
|
|
|
|
* clone C-lib call is difft from clone sys-call
|
|
|
|
*/
|
|
|
|
task_thread_info(p)->thr_ptr = regs->r3;
|
|
|
|
} else {
|
|
|
|
/* Normal fork case: set parent's TLS ptr in child */
|
|
|
|
task_thread_info(p)->thr_ptr =
|
|
|
|
task_thread_info(current)->thr_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-18 13:49:59 +07:00
|
|
|
/*
|
|
|
|
* Do necessary setup to start up a new user task
|
|
|
|
*/
|
|
|
|
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
|
|
|
|
{
|
|
|
|
regs->sp = usp;
|
|
|
|
regs->ret = pc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* [U]ser Mode bit set
|
|
|
|
* [L] ZOL loop inhibited to begin with - cleared by a LP insn
|
|
|
|
* Interrupts enabled
|
|
|
|
*/
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 20:00:41 +07:00
|
|
|
regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
|
2014-04-18 13:49:59 +07:00
|
|
|
|
|
|
|
/* bogus seed values for debugging */
|
|
|
|
regs->lp_start = 0x10;
|
|
|
|
regs->lp_end = 0x80;
|
|
|
|
}
|
|
|
|
|
2013-01-18 16:42:18 +07:00
|
|
|
/*
|
|
|
|
* Some archs flush debug and FPU info here
|
|
|
|
*/
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int elf_check_arch(const struct elf32_hdr *x)
|
|
|
|
{
|
|
|
|
unsigned int eflags;
|
|
|
|
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 20:00:41 +07:00
|
|
|
if (x->e_machine != EM_ARC_INUSE) {
|
|
|
|
pr_err("ELF not built for %s ISA\n",
|
|
|
|
is_isa_arcompact() ? "ARCompact":"ARCv2");
|
2013-01-18 16:42:18 +07:00
|
|
|
return 0;
|
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 20:00:41 +07:00
|
|
|
}
|
2013-01-18 16:42:18 +07:00
|
|
|
|
|
|
|
eflags = x->e_flags;
|
2016-08-11 04:10:57 +07:00
|
|
|
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
|
2013-01-18 16:42:18 +07:00
|
|
|
pr_err("ABI mismatch - you need newer toolchain\n");
|
|
|
|
force_sigsegv(SIGSEGV, current);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(elf_check_arch);
|