2008-04-27 16:26:36 +07:00
|
|
|
/* arch/sparc64/kernel/process.c
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2008-05-20 13:46:00 +07:00
|
|
|
* Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
|
2005-04-17 05:20:36 +07:00
|
|
|
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
|
|
|
|
* Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file handles the architecture-dependent parts of process handling..
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdarg.h>
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
2011-07-23 00:18:16 +07:00
|
|
|
#include <linux/export.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
2007-07-30 05:36:13 +07:00
|
|
|
#include <linux/fs.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/user.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/compat.h>
|
2007-02-22 21:24:45 +07:00
|
|
|
#include <linux/tick.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/init.h>
|
2007-07-16 17:49:40 +07:00
|
|
|
#include <linux/cpu.h>
|
2012-10-16 23:34:01 +07:00
|
|
|
#include <linux/perf_event.h>
|
2008-02-20 12:25:50 +07:00
|
|
|
#include <linux/elfcore.h>
|
2008-05-20 13:46:00 +07:00
|
|
|
#include <linux/sysrq.h>
|
2009-02-03 12:57:48 +07:00
|
|
|
#include <linux/nmi.h>
|
2013-09-14 19:02:11 +07:00
|
|
|
#include <linux/context_tracking.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/pstate.h>
|
|
|
|
#include <asm/elf.h>
|
|
|
|
#include <asm/fpumacro.h>
|
|
|
|
#include <asm/head.h>
|
|
|
|
#include <asm/cpudata.h>
|
2006-02-01 09:29:18 +07:00
|
|
|
#include <asm/mmu_context.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/unistd.h>
|
2006-02-22 07:55:23 +07:00
|
|
|
#include <asm/hypervisor.h>
|
2008-02-20 12:25:50 +07:00
|
|
|
#include <asm/syscalls.h>
|
2008-05-20 13:46:00 +07:00
|
|
|
#include <asm/irq_regs.h>
|
|
|
|
#include <asm/smp.h>
|
2012-10-16 23:34:01 +07:00
|
|
|
#include <asm/pcr.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-08-13 08:33:56 +07:00
|
|
|
#include "kstack.h"
|
|
|
|
|
2013-04-12 02:38:50 +07:00
|
|
|
/* Idle loop support on sparc64. */
|
|
|
|
void arch_cpu_idle(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-02-03 12:57:48 +07:00
|
|
|
if (tlb_type != hypervisor) {
|
|
|
|
touch_nmi_watchdog();
|
2014-03-25 01:45:12 +07:00
|
|
|
local_irq_enable();
|
2013-04-12 02:38:50 +07:00
|
|
|
} else {
|
2006-02-22 07:55:23 +07:00
|
|
|
unsigned long pstate;
|
|
|
|
|
2014-03-25 01:45:12 +07:00
|
|
|
local_irq_enable();
|
|
|
|
|
2013-04-12 02:38:50 +07:00
|
|
|
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
|
|
|
|
* the cpu sleep hypervisor call.
|
|
|
|
*/
|
2006-02-22 07:55:23 +07:00
|
|
|
__asm__ __volatile__(
|
|
|
|
"rdpr %%pstate, %0\n\t"
|
|
|
|
"andn %0, %1, %0\n\t"
|
|
|
|
"wrpr %0, %%g0, %%pstate"
|
|
|
|
: "=&r" (pstate)
|
|
|
|
: "i" (PSTATE_IE));
|
|
|
|
|
2013-04-12 02:38:50 +07:00
|
|
|
if (!need_resched() && !cpu_is_offline(smp_processor_id()))
|
2006-02-22 07:55:23 +07:00
|
|
|
sun4v_cpu_yield();
|
|
|
|
|
|
|
|
/* Re-enable interrupts. */
|
|
|
|
__asm__ __volatile__(
|
|
|
|
"rdpr %%pstate, %0\n\t"
|
|
|
|
"or %0, %1, %0\n\t"
|
|
|
|
"wrpr %0, %%g0, %%pstate"
|
|
|
|
: "=&r" (pstate)
|
|
|
|
: "i" (PSTATE_IE));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-02-22 07:55:23 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-07-16 17:49:40 +07:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2014-05-17 04:25:53 +07:00
|
|
|
void arch_cpu_idle_dead(void)
|
2013-04-12 02:38:50 +07:00
|
|
|
{
|
|
|
|
sched_preempt_enable_no_resched();
|
|
|
|
cpu_play_dead();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2013-04-12 02:38:50 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-19 05:58:05 +07:00
|
|
|
#ifdef CONFIG_COMPAT
|
2005-04-17 05:20:36 +07:00
|
|
|
static void show_regwindow32(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct reg_window32 __user *rw;
|
|
|
|
struct reg_window32 r_w;
|
|
|
|
mm_segment_t old_fs;
|
|
|
|
|
|
|
|
__asm__ __volatile__ ("flushw");
|
2016-03-11 06:21:43 +07:00
|
|
|
rw = compat_ptr((unsigned int)regs->u_regs[14]);
|
2005-04-17 05:20:36 +07:00
|
|
|
old_fs = get_fs();
|
|
|
|
set_fs (USER_DS);
|
|
|
|
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
|
|
|
|
set_fs (old_fs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_fs (old_fs);
|
|
|
|
printk("l0: %08x l1: %08x l2: %08x l3: %08x "
|
|
|
|
"l4: %08x l5: %08x l6: %08x l7: %08x\n",
|
|
|
|
r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
|
|
|
|
r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
|
|
|
|
printk("i0: %08x i1: %08x i2: %08x i3: %08x "
|
|
|
|
"i4: %08x i5: %08x i6: %08x i7: %08x\n",
|
|
|
|
r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
|
|
|
|
r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
|
|
|
|
}
|
2006-01-19 05:58:05 +07:00
|
|
|
#else
|
|
|
|
#define show_regwindow32(regs) do { } while (0)
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static void show_regwindow(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct reg_window __user *rw;
|
|
|
|
struct reg_window *rwk;
|
|
|
|
struct reg_window r_w;
|
|
|
|
mm_segment_t old_fs;
|
|
|
|
|
|
|
|
if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
|
|
|
|
__asm__ __volatile__ ("flushw");
|
|
|
|
rw = (struct reg_window __user *)
|
|
|
|
(regs->u_regs[14] + STACK_BIAS);
|
|
|
|
rwk = (struct reg_window *)
|
|
|
|
(regs->u_regs[14] + STACK_BIAS);
|
|
|
|
if (!(regs->tstate & TSTATE_PRIV)) {
|
|
|
|
old_fs = get_fs();
|
|
|
|
set_fs (USER_DS);
|
|
|
|
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
|
|
|
|
set_fs (old_fs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rwk = &r_w;
|
|
|
|
set_fs (old_fs);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
show_regwindow32(regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
|
|
|
|
rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
|
|
|
|
printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
|
|
|
|
rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
|
|
|
|
printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
|
|
|
|
rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
|
|
|
|
printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
|
|
|
|
rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
|
|
|
|
if (regs->tstate & TSTATE_PRIV)
|
2008-07-18 12:11:32 +07:00
|
|
|
printk("I7: <%pS>\n", (void *) rwk->ins[7]);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-08-01 10:33:43 +07:00
|
|
|
void show_regs(struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
dump_stack: unify debug information printed by show_regs()
show_regs() is inherently arch-dependent but it does make sense to print
generic debug information and some archs already do albeit in slightly
different forms. This patch introduces a generic function to print debug
information from show_regs() so that different archs print out the same
information and it's much easier to modify what's printed.
show_regs_print_info() prints out the same debug info as dump_stack()
does plus task and thread_info pointers.
* Archs which didn't print debug info now do.
alpha, arc, blackfin, c6x, cris, frv, h8300, hexagon, ia64, m32r,
metag, microblaze, mn10300, openrisc, parisc, score, sh64, sparc,
um, xtensa
* Already prints debug info. Replaced with show_regs_print_info().
The printed information is superset of what used to be there.
arm, arm64, avr32, mips, powerpc, sh32, tile, unicore32, x86
* s390 is special in that it used to print arch-specific information
along with generic debug info. Heiko and Martin think that the
arch-specific extra isn't worth keeping s390 specfic implementation.
Converted to use the generic version.
Note that now all archs print the debug info before actual register
dumps.
An example BUG() dump follows.
kernel BUG at /work/os/work/kernel/workqueue.c:4841!
invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Modules linked in:
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.9.0-rc1-work+ #7
Hardware name: empty empty/S3992, BIOS 080011 10/26/2007
task: ffff88007c85e040 ti: ffff88007c860000 task.ti: ffff88007c860000
RIP: 0010:[<ffffffff8234a07e>] [<ffffffff8234a07e>] init_workqueues+0x4/0x6
RSP: 0000:ffff88007c861ec8 EFLAGS: 00010246
RAX: ffff88007c861fd8 RBX: ffffffff824466a8 RCX: 0000000000000001
RDX: 0000000000000046 RSI: 0000000000000001 RDI: ffffffff8234a07a
RBP: ffff88007c861ec8 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: ffffffff8234a07a
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
FS: 0000000000000000(0000) GS:ffff88007dc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffff88015f7ff000 CR3: 00000000021f1000 CR4: 00000000000007f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Stack:
ffff88007c861ef8 ffffffff81000312 ffffffff824466a8 ffff88007c85e650
0000000000000003 0000000000000000 ffff88007c861f38 ffffffff82335e5d
ffff88007c862080 ffffffff8223d8c0 ffff88007c862080 ffffffff81c47760
Call Trace:
[<ffffffff81000312>] do_one_initcall+0x122/0x170
[<ffffffff82335e5d>] kernel_init_freeable+0x9b/0x1c8
[<ffffffff81c47760>] ? rest_init+0x140/0x140
[<ffffffff81c4776e>] kernel_init+0xe/0xf0
[<ffffffff81c6be9c>] ret_from_fork+0x7c/0xb0
[<ffffffff81c47760>] ? rest_init+0x140/0x140
...
v2: Typo fix in x86-32.
v3: CPU number dropped from show_regs_print_info() as
dump_stack_print_info() has been updated to print it. s390
specific implementation dropped as requested by s390 maintainers.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Fengguang Wu <fengguang.wu@intel.com>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Chris Metcalf <cmetcalf@tilera.com> [tile bits]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon bits]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 05:27:17 +07:00
|
|
|
show_regs_print_info(KERN_DEFAULT);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
|
|
|
|
regs->tpc, regs->tnpc, regs->y, print_tainted());
|
2008-07-18 12:11:32 +07:00
|
|
|
printk("TPC: <%pS>\n", (void *) regs->tpc);
|
2005-04-17 05:20:36 +07:00
|
|
|
printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
|
|
|
|
regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
|
|
|
|
regs->u_regs[3]);
|
|
|
|
printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
|
|
|
|
regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
|
|
|
|
regs->u_regs[7]);
|
|
|
|
printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
|
|
|
|
regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
|
|
|
|
regs->u_regs[11]);
|
|
|
|
printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
|
|
|
|
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
|
|
|
|
regs->u_regs[15]);
|
2008-07-18 12:11:32 +07:00
|
|
|
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
|
2005-04-17 05:20:36 +07:00
|
|
|
show_regwindow(regs);
|
2010-04-21 16:31:50 +07:00
|
|
|
show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
|
|
|
|
static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
|
2008-05-20 13:46:00 +07:00
|
|
|
|
|
|
|
static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
|
|
|
|
int this_cpu)
|
|
|
|
{
|
2012-10-16 23:34:01 +07:00
|
|
|
struct global_reg_snapshot *rp;
|
|
|
|
|
2008-05-20 13:46:00 +07:00
|
|
|
flushw_all();
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
rp = &global_cpu_snapshot[this_cpu].reg;
|
|
|
|
|
|
|
|
rp->tstate = regs->tstate;
|
|
|
|
rp->tpc = regs->tpc;
|
|
|
|
rp->tnpc = regs->tnpc;
|
|
|
|
rp->o7 = regs->u_regs[UREG_I7];
|
2008-05-20 13:46:00 +07:00
|
|
|
|
|
|
|
if (regs->tstate & TSTATE_PRIV) {
|
|
|
|
struct reg_window *rw;
|
|
|
|
|
|
|
|
rw = (struct reg_window *)
|
|
|
|
(regs->u_regs[UREG_FP] + STACK_BIAS);
|
2008-08-13 08:33:56 +07:00
|
|
|
if (kstack_valid(tp, (unsigned long) rw)) {
|
2012-10-16 23:34:01 +07:00
|
|
|
rp->i7 = rw->ins[7];
|
2008-07-31 11:57:59 +07:00
|
|
|
rw = (struct reg_window *)
|
|
|
|
(rw->ins[6] + STACK_BIAS);
|
2008-08-13 08:33:56 +07:00
|
|
|
if (kstack_valid(tp, (unsigned long) rw))
|
2012-10-16 23:34:01 +07:00
|
|
|
rp->rpc = rw->ins[7];
|
2008-07-31 11:57:59 +07:00
|
|
|
}
|
|
|
|
} else {
|
2012-10-16 23:34:01 +07:00
|
|
|
rp->i7 = 0;
|
|
|
|
rp->rpc = 0;
|
2008-07-31 11:57:59 +07:00
|
|
|
}
|
2012-10-16 23:34:01 +07:00
|
|
|
rp->thread = tp;
|
2008-05-20 13:46:00 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* In order to avoid hangs we do not try to synchronize with the
|
|
|
|
* global register dump client cpus. The last store they make is to
|
|
|
|
* the thread pointer, so do a short poll waiting for that to become
|
|
|
|
* non-NULL.
|
|
|
|
*/
|
|
|
|
static void __global_reg_poll(struct global_reg_snapshot *gp)
|
|
|
|
{
|
|
|
|
int limit = 0;
|
|
|
|
|
|
|
|
while (!gp->thread && ++limit < 100) {
|
|
|
|
barrier();
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
nmi_backtrace: add more trigger_*_cpu_backtrace() methods
Patch series "improvements to the nmi_backtrace code" v9.
This patch series modifies the trigger_xxx_backtrace() NMI-based remote
backtracing code to make it more flexible, and makes a few small
improvements along the way.
The motivation comes from the task isolation code, where there are
scenarios where we want to be able to diagnose a case where some cpu is
about to interrupt a task-isolated cpu. It can be helpful to see both
where the interrupting cpu is, and also an approximation of where the
cpu that is being interrupted is. The nmi_backtrace framework allows us
to discover the stack of the interrupted cpu.
I've tested that the change works as desired on tile, and build-tested
x86, arm, mips, and sparc64. For x86 I confirmed that the generic
cpuidle stuff as well as the architecture-specific routines are in the
new cpuidle section. For arm, mips, and sparc I just build-tested it
and made sure the generic cpuidle routines were in the new cpuidle
section, but I didn't attempt to figure out which the platform-specific
idle routines might be. That might be more usefully done by someone
with platform experience in follow-up patches.
This patch (of 4):
Currently you can only request a backtrace of either all cpus, or all
cpus but yourself. It can also be helpful to request a remote backtrace
of a single cpu, and since we want that, the logical extension is to
support a cpumask as the underlying primitive.
This change modifies the existing lib/nmi_backtrace.c code to take a
cpumask as its basic primitive, and modifies the linux/nmi.h code to use
the new "cpumask" method instead.
The existing clients of nmi_backtrace (arm and x86) are converted to
using the new cpumask approach in this change.
The other users of the backtracing API (sparc64 and mips) are converted
to use the cpumask approach rather than the all/allbutself approach.
The mips code ignored the "include_self" boolean but with this change it
will now also dump a local backtrace if requested.
Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com
Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm]
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 07:02:45 +07:00
|
|
|
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
2008-05-20 13:46:00 +07:00
|
|
|
{
|
|
|
|
struct thread_info *tp = current_thread_info();
|
|
|
|
struct pt_regs *regs = get_irq_regs();
|
|
|
|
unsigned long flags;
|
|
|
|
int this_cpu, cpu;
|
|
|
|
|
|
|
|
if (!regs)
|
|
|
|
regs = tp->kregs;
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
|
2008-05-20 13:46:00 +07:00
|
|
|
|
|
|
|
this_cpu = raw_smp_processor_id();
|
|
|
|
|
2014-06-24 03:22:05 +07:00
|
|
|
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
|
|
|
|
nmi_backtrace: add more trigger_*_cpu_backtrace() methods
Patch series "improvements to the nmi_backtrace code" v9.
This patch series modifies the trigger_xxx_backtrace() NMI-based remote
backtracing code to make it more flexible, and makes a few small
improvements along the way.
The motivation comes from the task isolation code, where there are
scenarios where we want to be able to diagnose a case where some cpu is
about to interrupt a task-isolated cpu. It can be helpful to see both
where the interrupting cpu is, and also an approximation of where the
cpu that is being interrupted is. The nmi_backtrace framework allows us
to discover the stack of the interrupted cpu.
I've tested that the change works as desired on tile, and build-tested
x86, arm, mips, and sparc64. For x86 I confirmed that the generic
cpuidle stuff as well as the architecture-specific routines are in the
new cpuidle section. For arm, mips, and sparc I just build-tested it
and made sure the generic cpuidle routines were in the new cpuidle
section, but I didn't attempt to figure out which the platform-specific
idle routines might be. That might be more usefully done by someone
with platform experience in follow-up patches.
This patch (of 4):
Currently you can only request a backtrace of either all cpus, or all
cpus but yourself. It can also be helpful to request a remote backtrace
of a single cpu, and since we want that, the logical extension is to
support a cpumask as the underlying primitive.
This change modifies the existing lib/nmi_backtrace.c code to take a
cpumask as its basic primitive, and modifies the linux/nmi.h code to use
the new "cpumask" method instead.
The existing clients of nmi_backtrace (arm and x86) are converted to
using the new cpumask approach in this change.
The other users of the backtracing API (sparc64 and mips) are converted
to use the cpumask approach rather than the all/allbutself approach.
The mips code ignored the "include_self" boolean but with this change it
will now also dump a local backtrace if requested.
Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com
Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm]
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 07:02:45 +07:00
|
|
|
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
2014-06-24 03:22:05 +07:00
|
|
|
__global_reg_self(tp, regs, this_cpu);
|
2008-05-20 13:46:00 +07:00
|
|
|
|
|
|
|
smp_fetch_global_regs();
|
|
|
|
|
nmi_backtrace: add more trigger_*_cpu_backtrace() methods
Patch series "improvements to the nmi_backtrace code" v9.
This patch series modifies the trigger_xxx_backtrace() NMI-based remote
backtracing code to make it more flexible, and makes a few small
improvements along the way.
The motivation comes from the task isolation code, where there are
scenarios where we want to be able to diagnose a case where some cpu is
about to interrupt a task-isolated cpu. It can be helpful to see both
where the interrupting cpu is, and also an approximation of where the
cpu that is being interrupted is. The nmi_backtrace framework allows us
to discover the stack of the interrupted cpu.
I've tested that the change works as desired on tile, and build-tested
x86, arm, mips, and sparc64. For x86 I confirmed that the generic
cpuidle stuff as well as the architecture-specific routines are in the
new cpuidle section. For arm, mips, and sparc I just build-tested it
and made sure the generic cpuidle routines were in the new cpuidle
section, but I didn't attempt to figure out which the platform-specific
idle routines might be. That might be more usefully done by someone
with platform experience in follow-up patches.
This patch (of 4):
Currently you can only request a backtrace of either all cpus, or all
cpus but yourself. It can also be helpful to request a remote backtrace
of a single cpu, and since we want that, the logical extension is to
support a cpumask as the underlying primitive.
This change modifies the existing lib/nmi_backtrace.c code to take a
cpumask as its basic primitive, and modifies the linux/nmi.h code to use
the new "cpumask" method instead.
The existing clients of nmi_backtrace (arm and x86) are converted to
using the new cpumask approach in this change.
The other users of the backtracing API (sparc64 and mips) are converted
to use the cpumask approach rather than the all/allbutself approach.
The mips code ignored the "include_self" boolean but with this change it
will now also dump a local backtrace if requested.
Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com
Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm]
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 07:02:45 +07:00
|
|
|
for_each_cpu(cpu, mask) {
|
2014-06-24 03:22:05 +07:00
|
|
|
struct global_reg_snapshot *gp;
|
|
|
|
|
nmi_backtrace: add more trigger_*_cpu_backtrace() methods
Patch series "improvements to the nmi_backtrace code" v9.
This patch series modifies the trigger_xxx_backtrace() NMI-based remote
backtracing code to make it more flexible, and makes a few small
improvements along the way.
The motivation comes from the task isolation code, where there are
scenarios where we want to be able to diagnose a case where some cpu is
about to interrupt a task-isolated cpu. It can be helpful to see both
where the interrupting cpu is, and also an approximation of where the
cpu that is being interrupted is. The nmi_backtrace framework allows us
to discover the stack of the interrupted cpu.
I've tested that the change works as desired on tile, and build-tested
x86, arm, mips, and sparc64. For x86 I confirmed that the generic
cpuidle stuff as well as the architecture-specific routines are in the
new cpuidle section. For arm, mips, and sparc I just build-tested it
and made sure the generic cpuidle routines were in the new cpuidle
section, but I didn't attempt to figure out which the platform-specific
idle routines might be. That might be more usefully done by someone
with platform experience in follow-up patches.
This patch (of 4):
Currently you can only request a backtrace of either all cpus, or all
cpus but yourself. It can also be helpful to request a remote backtrace
of a single cpu, and since we want that, the logical extension is to
support a cpumask as the underlying primitive.
This change modifies the existing lib/nmi_backtrace.c code to take a
cpumask as its basic primitive, and modifies the linux/nmi.h code to use
the new "cpumask" method instead.
The existing clients of nmi_backtrace (arm and x86) are converted to
using the new cpumask approach in this change.
The other users of the backtracing API (sparc64 and mips) are converted
to use the cpumask approach rather than the all/allbutself approach.
The mips code ignored the "include_self" boolean but with this change it
will now also dump a local backtrace if requested.
Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com
Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm]
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 07:02:45 +07:00
|
|
|
if (exclude_self && cpu == this_cpu)
|
2014-06-24 03:22:05 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
gp = &global_cpu_snapshot[cpu].reg;
|
2008-05-20 13:46:00 +07:00
|
|
|
|
|
|
|
__global_reg_poll(gp);
|
|
|
|
|
|
|
|
tp = gp->thread;
|
|
|
|
printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
|
|
|
|
(cpu == this_cpu ? '*' : ' '), cpu,
|
|
|
|
gp->tstate, gp->tpc, gp->tnpc,
|
|
|
|
((tp && tp->task) ? tp->task->comm : "NULL"),
|
|
|
|
((tp && tp->task) ? tp->task->pid : -1));
|
2008-07-18 12:11:32 +07:00
|
|
|
|
2008-05-20 13:46:00 +07:00
|
|
|
if (gp->tstate & TSTATE_PRIV) {
|
2008-07-31 11:57:59 +07:00
|
|
|
printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
|
2008-07-18 12:11:32 +07:00
|
|
|
(void *) gp->tpc,
|
|
|
|
(void *) gp->o7,
|
2008-07-31 11:57:59 +07:00
|
|
|
(void *) gp->i7,
|
|
|
|
(void *) gp->rpc);
|
2008-07-18 12:11:32 +07:00
|
|
|
} else {
|
2008-07-31 11:57:59 +07:00
|
|
|
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
|
|
|
|
gp->tpc, gp->o7, gp->i7, gp->rpc);
|
2008-05-20 13:46:00 +07:00
|
|
|
}
|
2015-03-20 03:06:53 +07:00
|
|
|
|
|
|
|
touch_nmi_watchdog();
|
2008-05-20 13:46:00 +07:00
|
|
|
}
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
2008-05-20 13:46:00 +07:00
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
|
2008-05-20 13:46:00 +07:00
|
|
|
}
|
|
|
|
|
2008-07-31 12:35:00 +07:00
|
|
|
#ifdef CONFIG_MAGIC_SYSRQ
|
|
|
|
|
2010-08-18 11:15:46 +07:00
|
|
|
static void sysrq_handle_globreg(int key)
|
2008-07-31 12:35:00 +07:00
|
|
|
{
|
nmi_backtrace: add more trigger_*_cpu_backtrace() methods
Patch series "improvements to the nmi_backtrace code" v9.
This patch series modifies the trigger_xxx_backtrace() NMI-based remote
backtracing code to make it more flexible, and makes a few small
improvements along the way.
The motivation comes from the task isolation code, where there are
scenarios where we want to be able to diagnose a case where some cpu is
about to interrupt a task-isolated cpu. It can be helpful to see both
where the interrupting cpu is, and also an approximation of where the
cpu that is being interrupted is. The nmi_backtrace framework allows us
to discover the stack of the interrupted cpu.
I've tested that the change works as desired on tile, and build-tested
x86, arm, mips, and sparc64. For x86 I confirmed that the generic
cpuidle stuff as well as the architecture-specific routines are in the
new cpuidle section. For arm, mips, and sparc I just build-tested it
and made sure the generic cpuidle routines were in the new cpuidle
section, but I didn't attempt to figure out which the platform-specific
idle routines might be. That might be more usefully done by someone
with platform experience in follow-up patches.
This patch (of 4):
Currently you can only request a backtrace of either all cpus, or all
cpus but yourself. It can also be helpful to request a remote backtrace
of a single cpu, and since we want that, the logical extension is to
support a cpumask as the underlying primitive.
This change modifies the existing lib/nmi_backtrace.c code to take a
cpumask as its basic primitive, and modifies the linux/nmi.h code to use
the new "cpumask" method instead.
The existing clients of nmi_backtrace (arm and x86) are converted to
using the new cpumask approach in this change.
The other users of the backtracing API (sparc64 and mips) are converted
to use the cpumask approach rather than the all/allbutself approach.
The mips code ignored the "include_self" boolean but with this change it
will now also dump a local backtrace if requested.
Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com
Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm]
Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 07:02:45 +07:00
|
|
|
trigger_all_cpu_backtrace();
|
2008-07-31 12:35:00 +07:00
|
|
|
}
|
|
|
|
|
2008-05-20 13:46:00 +07:00
|
|
|
static struct sysrq_key_op sparc_globalreg_op = {
|
|
|
|
.handler = sysrq_handle_globreg,
|
2013-05-01 05:28:55 +07:00
|
|
|
.help_msg = "global-regs(y)",
|
2008-05-20 13:46:00 +07:00
|
|
|
.action_msg = "Show Global CPU Regs",
|
|
|
|
};
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
static void __global_pmu_self(int this_cpu)
|
2008-05-20 13:46:00 +07:00
|
|
|
{
|
2012-10-16 23:34:01 +07:00
|
|
|
struct global_pmu_snapshot *pp;
|
|
|
|
int i, num;
|
|
|
|
|
2014-08-12 05:38:46 +07:00
|
|
|
if (!pcr_ops)
|
|
|
|
return;
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
pp = &global_cpu_snapshot[this_cpu].pmu;
|
|
|
|
|
|
|
|
num = 1;
|
|
|
|
if (tlb_type == hypervisor &&
|
|
|
|
sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
|
|
|
|
num = 4;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
pp->pcr[i] = pcr_ops->read_pcr(i);
|
|
|
|
pp->pic[i] = pcr_ops->read_pic(i);
|
|
|
|
}
|
2008-05-20 13:46:00 +07:00
|
|
|
}
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
static void __global_pmu_poll(struct global_pmu_snapshot *pp)
|
|
|
|
{
|
|
|
|
int limit = 0;
|
|
|
|
|
|
|
|
while (!pp->pcr[0] && ++limit < 100) {
|
|
|
|
barrier();
|
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pmu_snapshot_all_cpus(void)
|
2008-05-20 13:46:00 +07:00
|
|
|
{
|
2012-10-16 23:34:01 +07:00
|
|
|
unsigned long flags;
|
|
|
|
int this_cpu, cpu;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
|
|
|
|
|
|
|
|
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
|
|
|
|
|
|
|
this_cpu = raw_smp_processor_id();
|
|
|
|
|
|
|
|
__global_pmu_self(this_cpu);
|
|
|
|
|
|
|
|
smp_fetch_global_pmu();
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
|
|
|
|
|
|
|
|
__global_pmu_poll(pp);
|
|
|
|
|
|
|
|
printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
|
|
|
|
(cpu == this_cpu ? '*' : ' '), cpu,
|
|
|
|
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
|
|
|
|
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
|
2015-03-20 03:06:53 +07:00
|
|
|
|
|
|
|
touch_nmi_watchdog();
|
2012-10-16 23:34:01 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sysrq_handle_globpmu(int key)
|
|
|
|
{
|
|
|
|
pmu_snapshot_all_cpus();
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sysrq_key_op sparc_globalpmu_op = {
|
|
|
|
.handler = sysrq_handle_globpmu,
|
2013-05-01 05:28:55 +07:00
|
|
|
.help_msg = "global-pmu(x)",
|
2012-10-16 23:34:01 +07:00
|
|
|
.action_msg = "Show Global PMU Regs",
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init sparc_sysrq_init(void)
|
|
|
|
{
|
|
|
|
int ret = register_sysrq_key('y', &sparc_globalreg_op);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = register_sysrq_key('x', &sparc_globalpmu_op);
|
|
|
|
return ret;
|
2008-05-20 13:46:00 +07:00
|
|
|
}
|
|
|
|
|
2012-10-16 23:34:01 +07:00
|
|
|
core_initcall(sparc_sysrq_init);
|
2008-05-20 13:46:00 +07:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long thread_saved_pc(struct task_struct *tsk)
|
|
|
|
{
|
2006-01-12 16:05:42 +07:00
|
|
|
struct thread_info *ti = task_thread_info(tsk);
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long ret = 0xdeadbeefUL;
|
|
|
|
|
|
|
|
if (ti && ti->ksp) {
|
|
|
|
unsigned long *sp;
|
|
|
|
sp = (unsigned long *)(ti->ksp + STACK_BIAS);
|
|
|
|
if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
|
|
|
|
sp[14]) {
|
|
|
|
unsigned long *fp;
|
|
|
|
fp = (unsigned long *)(sp[14] + STACK_BIAS);
|
|
|
|
if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
|
|
|
|
ret = fp[15];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free current thread data structures etc.. */
|
2016-05-21 07:00:20 +07:00
|
|
|
void exit_thread(struct task_struct *tsk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2016-05-21 07:00:20 +07:00
|
|
|
struct thread_info *t = task_thread_info(tsk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (t->utraps) {
|
|
|
|
if (t->utraps[0] < 2)
|
|
|
|
kfree (t->utraps);
|
|
|
|
else
|
|
|
|
t->utraps[0]--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
struct thread_info *t = current_thread_info();
|
2006-02-01 09:29:18 +07:00
|
|
|
struct mm_struct *mm;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-02-01 09:29:18 +07:00
|
|
|
mm = t->task->mm;
|
|
|
|
if (mm)
|
2006-02-01 09:31:20 +07:00
|
|
|
tsb_context_switch(mm);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
set_thread_wsaved(0);
|
|
|
|
|
|
|
|
/* Clear FPU register state. */
|
|
|
|
t->fpsaved[0] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It's a bit more tricky when 64-bit tasks are involved... */
|
|
|
|
static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
|
|
|
|
{
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 05:18:37 +07:00
|
|
|
bool stack_64bit = test_thread_64bit_stack(psp);
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long fp, distance, rval;
|
|
|
|
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 05:18:37 +07:00
|
|
|
if (stack_64bit) {
|
2005-04-17 05:20:36 +07:00
|
|
|
csp += STACK_BIAS;
|
|
|
|
psp += STACK_BIAS;
|
|
|
|
__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
|
|
|
|
fp += STACK_BIAS;
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 05:18:37 +07:00
|
|
|
if (test_thread_flag(TIF_32BIT))
|
|
|
|
fp &= 0xffffffff;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
|
|
|
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
|
|
|
|
|
2010-02-10 07:18:40 +07:00
|
|
|
/* Now align the stack as this is mandatory in the Sparc ABI
|
|
|
|
* due to how register windows work. This hides the
|
|
|
|
* restriction from thread libraries etc.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2010-02-10 07:18:40 +07:00
|
|
|
csp &= ~15UL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
distance = fp - psp;
|
|
|
|
rval = (csp - distance);
|
|
|
|
if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
|
|
|
|
rval = 0;
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 05:18:37 +07:00
|
|
|
else if (!stack_64bit) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (put_user(((u32)csp),
|
|
|
|
&(((struct reg_window32 __user *)rval)->ins[6])))
|
|
|
|
rval = 0;
|
|
|
|
} else {
|
|
|
|
if (put_user(((u64)csp - STACK_BIAS),
|
|
|
|
&(((struct reg_window __user *)rval)->ins[6])))
|
|
|
|
rval = 0;
|
|
|
|
else
|
|
|
|
rval = rval - STACK_BIAS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Standard stuff. */
|
|
|
|
static inline void shift_window_buffer(int first_win, int last_win,
|
|
|
|
struct thread_info *t)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = first_win; i < last_win; i++) {
|
|
|
|
t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
|
|
|
|
memcpy(&t->reg_window[i], &t->reg_window[i+1],
|
|
|
|
sizeof(struct reg_window));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void synchronize_user_stack(void)
|
|
|
|
{
|
|
|
|
struct thread_info *t = current_thread_info();
|
|
|
|
unsigned long window;
|
|
|
|
|
|
|
|
flush_user_windows();
|
|
|
|
if ((window = get_thread_wsaved()) != 0) {
|
|
|
|
window -= 1;
|
|
|
|
do {
|
|
|
|
struct reg_window *rwin = &t->reg_window[window];
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 05:18:37 +07:00
|
|
|
int winsize = sizeof(struct reg_window);
|
|
|
|
unsigned long sp;
|
|
|
|
|
|
|
|
sp = t->rwbuf_stkptrs[window];
|
|
|
|
|
|
|
|
if (test_thread_64bit_stack(sp))
|
|
|
|
sp += STACK_BIAS;
|
|
|
|
else
|
|
|
|
winsize = sizeof(struct reg_window32);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!copy_to_user((char __user *)sp, rwin, winsize)) {
|
|
|
|
shift_window_buffer(window, get_thread_wsaved() - 1, t);
|
|
|
|
set_thread_wsaved(get_thread_wsaved() - 1);
|
|
|
|
}
|
|
|
|
} while (window--);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-04 15:10:01 +07:00
|
|
|
static void stack_unaligned(unsigned long sp)
|
|
|
|
{
|
|
|
|
siginfo_t info;
|
|
|
|
|
|
|
|
info.si_signo = SIGBUS;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = BUS_ADRALN;
|
|
|
|
info.si_addr = (void __user *) sp;
|
|
|
|
info.si_trapno = 0;
|
|
|
|
force_sig_info(SIGBUS, &info, current);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
void fault_in_user_windows(void)
|
|
|
|
{
|
|
|
|
struct thread_info *t = current_thread_info();
|
|
|
|
unsigned long window;
|
|
|
|
|
|
|
|
flush_user_windows();
|
|
|
|
window = get_thread_wsaved();
|
|
|
|
|
2006-02-04 15:10:01 +07:00
|
|
|
if (likely(window != 0)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
window -= 1;
|
|
|
|
do {
|
|
|
|
struct reg_window *rwin = &t->reg_window[window];
|
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.
The Montgomery Multiply, Montgomery Square, and Multiple-Precision
Multiply instructions work by loading a combination of the floating
point and multiple register windows worth of integer registers
with the inputs.
These values are 64-bit. But for 32-bit userland processes we only
save the low 32-bits of each integer register during a register spill.
This is because the register window save area is in the user stack and
has a fixed layout.
Therefore, the only way to use these instruction in 32-bit mode is to
perform the following sequence:
1) Load the top-32bits of a choosen integer register with a sentinel,
say "-1". This will be in the outer-most register window.
The idea is that we're trying to see if the outer-most register
window gets spilled, and thus the 64-bit values were truncated.
2) Load all the inputs for the montmul/montsqr/mpmul instruction,
down to the inner-most register window.
3) Execute the opcode.
4) Traverse back up to the outer-most register window.
5) Check the sentinel, if it's still "-1" store the results.
Otherwise retry the entire sequence.
This retry is extremely troublesome. If you're just unlucky and an
interrupt or other trap happens, it'll push that outer-most window to
the stack and clear the sentinel when we restore it.
We could retry forever and never make forward progress if interrupts
arrive at a fast enough rate (consider perf events as one example).
So we have do limited retries and fallback to software which is
extremely non-deterministic.
Luckily it's very straightforward to provide a mechanism to let
32-bit applications use a 64-bit stack. Stacks in 64-bit mode are
biased by 2047 bytes, which means that the lowest bit is set in the
actual %sp register value.
So if we see bit zero set in a 32-bit application's stack we treat
it like a 64-bit stack.
Runtime detection of such a facility is tricky, and cumbersome at
best. For example, just trying to use a biased stack and seeing if it
works is hard to recover from (the signal handler will need to use an
alt stack, plus something along the lines of longjmp). Therefore, we
add a system call to report a bitmask of arch specific features like
this in a cheap and less hairy way.
With help from Andy Polyakov.
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-10-27 05:18:37 +07:00
|
|
|
int winsize = sizeof(struct reg_window);
|
|
|
|
unsigned long sp;
|
|
|
|
|
|
|
|
sp = t->rwbuf_stkptrs[window];
|
|
|
|
|
|
|
|
if (test_thread_64bit_stack(sp))
|
|
|
|
sp += STACK_BIAS;
|
|
|
|
else
|
|
|
|
winsize = sizeof(struct reg_window32);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-02-04 15:10:01 +07:00
|
|
|
if (unlikely(sp & 0x7UL))
|
|
|
|
stack_unaligned(sp);
|
|
|
|
|
|
|
|
if (unlikely(copy_to_user((char __user *)sp,
|
|
|
|
rwin, winsize)))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto barf;
|
|
|
|
} while (window--);
|
|
|
|
}
|
|
|
|
set_thread_wsaved(0);
|
|
|
|
return;
|
|
|
|
|
|
|
|
barf:
|
|
|
|
set_thread_wsaved(window + 1);
|
2013-09-14 19:02:11 +07:00
|
|
|
user_exit();
|
2005-04-17 05:20:36 +07:00
|
|
|
do_exit(SIGILL);
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sparc_do_fork(unsigned long clone_flags,
|
|
|
|
unsigned long stack_start,
|
|
|
|
struct pt_regs *regs,
|
|
|
|
unsigned long stack_size)
|
|
|
|
{
|
|
|
|
int __user *parent_tid_ptr, *child_tid_ptr;
|
2008-05-08 06:21:28 +07:00
|
|
|
unsigned long orig_i1 = regs->u_regs[UREG_I1];
|
|
|
|
long ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
if (test_thread_flag(TIF_32BIT)) {
|
|
|
|
parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
|
|
|
|
child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
|
|
|
|
child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
|
|
|
|
}
|
|
|
|
|
2012-10-23 10:10:08 +07:00
|
|
|
ret = do_fork(clone_flags, stack_start, stack_size,
|
2008-05-08 06:21:28 +07:00
|
|
|
parent_tid_ptr, child_tid_ptr);
|
|
|
|
|
|
|
|
/* If we get an error and potentially restart the system
|
|
|
|
* call, we're screwed because copy_thread() clobbered
|
|
|
|
* the parent's %o1. So detect that case and restore it
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
|
|
|
|
regs->u_regs[UREG_I1] = orig_i1;
|
|
|
|
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy a Sparc thread. The fork() return value conventions
|
|
|
|
* under SunOS are nothing short of bletcherous:
|
|
|
|
* Parent --> %o0 == childs pid, %o1 == 0
|
|
|
|
* Child --> %o0 == parents pid, %o1 == 1
|
|
|
|
*/
|
2009-04-03 06:56:59 +07:00
|
|
|
int copy_thread(unsigned long clone_flags, unsigned long sp,
|
2012-10-23 09:51:14 +07:00
|
|
|
unsigned long arg, struct task_struct *p)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-01-12 16:05:43 +07:00
|
|
|
struct thread_info *t = task_thread_info(p);
|
2012-10-23 09:51:14 +07:00
|
|
|
struct pt_regs *regs = current_pt_regs();
|
2008-05-22 08:14:28 +07:00
|
|
|
struct sparc_stackf *parent_sf;
|
|
|
|
unsigned long child_stack_sz;
|
2005-04-17 05:20:36 +07:00
|
|
|
char *child_trap_frame;
|
|
|
|
|
2008-05-22 08:14:28 +07:00
|
|
|
/* Calculate offset to stack_frame & pt_regs */
|
2012-10-06 09:37:01 +07:00
|
|
|
child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
|
2008-05-22 08:14:28 +07:00
|
|
|
child_trap_frame = (task_stack_page(p) +
|
|
|
|
(THREAD_SIZE - child_stack_sz));
|
|
|
|
|
2005-07-25 09:36:26 +07:00
|
|
|
t->new_child = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
|
2008-05-22 08:14:28 +07:00
|
|
|
t->kregs = (struct pt_regs *) (child_trap_frame +
|
|
|
|
sizeof(struct sparc_stackf));
|
2005-04-17 05:20:36 +07:00
|
|
|
t->fpsaved[0] = 0;
|
|
|
|
|
2012-10-06 09:37:01 +07:00
|
|
|
if (unlikely(p->flags & PF_KTHREAD)) {
|
|
|
|
memset(child_trap_frame, 0, child_stack_sz);
|
|
|
|
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
|
|
|
|
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
|
2012-09-26 12:21:14 +07:00
|
|
|
t->current_ds = ASI_P;
|
2012-10-06 09:37:01 +07:00
|
|
|
t->kregs->u_regs[UREG_G1] = sp; /* function */
|
|
|
|
t->kregs->u_regs[UREG_G2] = arg;
|
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-10-06 09:37:01 +07:00
|
|
|
parent_sf = ((struct sparc_stackf *) regs) - 1;
|
|
|
|
memcpy(child_trap_frame, parent_sf, child_stack_sz);
|
|
|
|
if (t->flags & _TIF_32BIT) {
|
|
|
|
sp &= 0x00000000ffffffffUL;
|
|
|
|
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
|
|
|
|
}
|
|
|
|
t->kregs->u_regs[UREG_FP] = sp;
|
|
|
|
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
|
|
|
|
(regs->tstate + 1) & TSTATE_CWP;
|
|
|
|
t->current_ds = ASI_AIUS;
|
|
|
|
if (sp != regs->u_regs[UREG_FP]) {
|
|
|
|
unsigned long csp;
|
|
|
|
|
|
|
|
csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
|
|
|
|
if (!csp)
|
|
|
|
return -EFAULT;
|
|
|
|
t->kregs->u_regs[UREG_FP] = csp;
|
|
|
|
}
|
|
|
|
if (t->utraps)
|
|
|
|
t->utraps[0]++;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Set the return value for the child. */
|
|
|
|
t->kregs->u_regs[UREG_I0] = current->pid;
|
|
|
|
t->kregs->u_regs[UREG_I1] = 1;
|
|
|
|
|
|
|
|
/* Set the second return value for the parent. */
|
|
|
|
regs->u_regs[UREG_I1] = 0;
|
|
|
|
|
|
|
|
if (clone_flags & CLONE_SETTLS)
|
|
|
|
t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
union {
|
|
|
|
unsigned int pr_regs[32];
|
|
|
|
unsigned long pr_dregs[16];
|
|
|
|
} pr_fr;
|
|
|
|
unsigned int __unused;
|
|
|
|
unsigned int pr_fsr;
|
|
|
|
unsigned char pr_qcnt;
|
|
|
|
unsigned char pr_q_entrysize;
|
|
|
|
unsigned char pr_en;
|
|
|
|
unsigned int pr_q[64];
|
|
|
|
} elf_fpregset_t32;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fill in the fpu structure for a core dump.
|
|
|
|
*/
|
|
|
|
int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
|
|
|
|
{
|
|
|
|
unsigned long *kfpregs = current_thread_info()->fpregs;
|
|
|
|
unsigned long fprs = current_thread_info()->fpsaved[0];
|
|
|
|
|
|
|
|
if (test_thread_flag(TIF_32BIT)) {
|
|
|
|
elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
|
|
|
|
|
|
|
|
if (fprs & FPRS_DL)
|
|
|
|
memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
|
|
|
|
sizeof(unsigned int) * 32);
|
|
|
|
else
|
|
|
|
memset(&fpregs32->pr_fr.pr_regs[0], 0,
|
|
|
|
sizeof(unsigned int) * 32);
|
|
|
|
fpregs32->pr_qcnt = 0;
|
|
|
|
fpregs32->pr_q_entrysize = 8;
|
|
|
|
memset(&fpregs32->pr_q[0], 0,
|
|
|
|
(sizeof(unsigned int) * 64));
|
|
|
|
if (fprs & FPRS_FEF) {
|
|
|
|
fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
|
|
|
|
fpregs32->pr_en = 1;
|
|
|
|
} else {
|
|
|
|
fpregs32->pr_fsr = 0;
|
|
|
|
fpregs32->pr_en = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if(fprs & FPRS_DL)
|
|
|
|
memcpy(&fpregs->pr_regs[0], kfpregs,
|
|
|
|
sizeof(unsigned int) * 32);
|
|
|
|
else
|
|
|
|
memset(&fpregs->pr_regs[0], 0,
|
|
|
|
sizeof(unsigned int) * 32);
|
|
|
|
if(fprs & FPRS_DU)
|
|
|
|
memcpy(&fpregs->pr_regs[16], kfpregs+16,
|
|
|
|
sizeof(unsigned int) * 32);
|
|
|
|
else
|
|
|
|
memset(&fpregs->pr_regs[16], 0,
|
|
|
|
sizeof(unsigned int) * 32);
|
|
|
|
if(fprs & FPRS_FEF) {
|
|
|
|
fpregs->pr_fsr = current_thread_info()->xfsr[0];
|
|
|
|
fpregs->pr_gsr = current_thread_info()->gsr[0];
|
|
|
|
} else {
|
|
|
|
fpregs->pr_fsr = fpregs->pr_gsr = 0;
|
|
|
|
}
|
|
|
|
fpregs->pr_fprs = fprs;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
2009-01-09 07:58:20 +07:00
|
|
|
EXPORT_SYMBOL(dump_fpu);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *task)
|
|
|
|
{
|
|
|
|
unsigned long pc, fp, bias = 0;
|
2008-08-13 08:33:56 +07:00
|
|
|
struct thread_info *tp;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct reg_window *rw;
|
|
|
|
unsigned long ret = 0;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
if (!task || task == current ||
|
|
|
|
task->state == TASK_RUNNING)
|
|
|
|
goto out;
|
|
|
|
|
2008-08-13 08:33:56 +07:00
|
|
|
tp = task_thread_info(task);
|
2005-04-17 05:20:36 +07:00
|
|
|
bias = STACK_BIAS;
|
2006-01-12 16:05:42 +07:00
|
|
|
fp = task_thread_info(task)->ksp + bias;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
do {
|
2008-08-13 08:33:56 +07:00
|
|
|
if (!kstack_valid(tp, fp))
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
rw = (struct reg_window *) fp;
|
|
|
|
pc = rw->ins[7];
|
|
|
|
if (!in_sched_functions(pc)) {
|
|
|
|
ret = pc;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
fp = rw->ins[6] + bias;
|
|
|
|
} while (++count < 16);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|