2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
2008-11-25 09:24:11 +07:00
|
|
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
|
|
|
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
|
2008-11-25 09:24:11 +07:00
|
|
|
* 2000-2002 x86-64 support by Andi Kleen
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
2008-11-22 08:36:41 +07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/wait.h>
|
2008-11-22 08:36:41 +07:00
|
|
|
#include <linux/ptrace.h>
|
2008-03-15 07:46:38 +07:00
|
|
|
#include <linux/tracehook.h>
|
2008-11-22 08:36:41 +07:00
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/uaccess.h>
|
2008-03-06 16:33:08 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/ucontext.h>
|
|
|
|
#include <asm/i387.h>
|
2008-01-30 19:30:42 +07:00
|
|
|
#include <asm/vdso.h>
|
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-04-29 00:07:31 +07:00
|
|
|
#include <asm/mce.h>
|
2008-11-22 08:36:41 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
#include <asm/proto.h>
|
|
|
|
#include <asm/ia32_unistd.h>
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
2008-09-06 06:26:55 +07:00
|
|
|
#include <asm/syscall.h>
|
2008-07-21 23:04:13 +07:00
|
|
|
#include <asm/syscalls.h>
|
2008-03-06 16:33:08 +07:00
|
|
|
|
2008-12-18 09:50:32 +07:00
|
|
|
#include <asm/sigframe.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
|
|
|
|
2008-02-09 03:09:59 +07:00
|
|
|
#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
|
|
|
|
X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
|
|
|
|
X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
|
|
|
|
X86_EFLAGS_CF)
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
|
|
|
|
#else
|
|
|
|
# define FIX_EFLAGS __FIX_EFLAGS
|
|
|
|
#endif
|
|
|
|
|
2009-02-09 20:17:40 +07:00
|
|
|
#define COPY(x) do { \
|
|
|
|
get_user_ex(regs->x, &sc->x); \
|
|
|
|
} while (0)
|
2008-11-25 09:21:37 +07:00
|
|
|
|
2009-02-09 20:17:40 +07:00
|
|
|
#define GET_SEG(seg) ({ \
|
|
|
|
unsigned short tmp; \
|
|
|
|
get_user_ex(tmp, &sc->seg); \
|
|
|
|
tmp; \
|
|
|
|
})
|
2008-11-25 09:21:37 +07:00
|
|
|
|
2009-02-09 20:17:40 +07:00
|
|
|
#define COPY_SEG(seg) do { \
|
|
|
|
regs->seg = GET_SEG(seg); \
|
|
|
|
} while (0)
|
2008-11-25 09:21:37 +07:00
|
|
|
|
2009-02-09 20:17:40 +07:00
|
|
|
#define COPY_SEG_CPL3(seg) do { \
|
|
|
|
regs->seg = GET_SEG(seg) | 3; \
|
|
|
|
} while (0)
|
2008-11-25 09:21:37 +07:00
|
|
|
|
|
|
|
static int
|
|
|
|
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|
|
|
unsigned long *pax)
|
|
|
|
{
|
|
|
|
void __user *buf;
|
|
|
|
unsigned int tmpflags;
|
|
|
|
unsigned int err = 0;
|
|
|
|
|
|
|
|
/* Always make any pending restarted system calls return -EINTR */
|
|
|
|
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
get_user_try {
|
|
|
|
|
2008-11-25 09:21:37 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2009-02-09 20:17:40 +07:00
|
|
|
set_user_gs(regs, GET_SEG(gs));
|
2009-01-24 06:50:10 +07:00
|
|
|
COPY_SEG(fs);
|
|
|
|
COPY_SEG(es);
|
|
|
|
COPY_SEG(ds);
|
2008-11-25 09:21:37 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
|
|
|
COPY(dx); COPY(cx); COPY(ip);
|
2008-11-25 09:21:37 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
2009-01-24 06:50:10 +07:00
|
|
|
COPY(r8);
|
|
|
|
COPY(r9);
|
|
|
|
COPY(r10);
|
|
|
|
COPY(r11);
|
|
|
|
COPY(r12);
|
|
|
|
COPY(r13);
|
|
|
|
COPY(r14);
|
|
|
|
COPY(r15);
|
2008-11-25 09:21:37 +07:00
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
2009-01-24 06:50:10 +07:00
|
|
|
COPY_SEG_CPL3(cs);
|
|
|
|
COPY_SEG_CPL3(ss);
|
2008-11-25 09:21:37 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
2009-01-24 06:50:10 +07:00
|
|
|
/* Kernel saves and restores only the CS segment register on signals,
|
|
|
|
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
|
|
|
* App's signal handler can save/restore other segments if needed. */
|
|
|
|
COPY_SEG_CPL3(cs);
|
2008-11-25 09:21:37 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
get_user_ex(tmpflags, &sc->flags);
|
|
|
|
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
|
|
|
regs->orig_ax = -1; /* disable syscall checks */
|
|
|
|
|
|
|
|
get_user_ex(buf, &sc->fpstate);
|
|
|
|
err |= restore_i387_xstate(buf);
|
2008-11-25 09:21:37 +07:00
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
get_user_ex(*pax, &sc->ax);
|
|
|
|
} get_user_catch(err);
|
2008-11-25 09:21:37 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
|
|
|
struct pt_regs *regs, unsigned long mask)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_try {
|
2008-11-25 09:21:37 +07:00
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2009-02-09 20:17:40 +07:00
|
|
|
put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
|
|
|
|
put_user_ex(regs->es, (unsigned int __user *)&sc->es);
|
|
|
|
put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
|
2008-11-25 09:21:37 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(regs->di, &sc->di);
|
|
|
|
put_user_ex(regs->si, &sc->si);
|
|
|
|
put_user_ex(regs->bp, &sc->bp);
|
|
|
|
put_user_ex(regs->sp, &sc->sp);
|
|
|
|
put_user_ex(regs->bx, &sc->bx);
|
|
|
|
put_user_ex(regs->dx, &sc->dx);
|
|
|
|
put_user_ex(regs->cx, &sc->cx);
|
|
|
|
put_user_ex(regs->ax, &sc->ax);
|
2008-11-25 09:21:37 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(regs->r8, &sc->r8);
|
|
|
|
put_user_ex(regs->r9, &sc->r9);
|
|
|
|
put_user_ex(regs->r10, &sc->r10);
|
|
|
|
put_user_ex(regs->r11, &sc->r11);
|
|
|
|
put_user_ex(regs->r12, &sc->r12);
|
|
|
|
put_user_ex(regs->r13, &sc->r13);
|
|
|
|
put_user_ex(regs->r14, &sc->r14);
|
|
|
|
put_user_ex(regs->r15, &sc->r15);
|
2008-11-25 09:21:37 +07:00
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(current->thread.trap_no, &sc->trapno);
|
|
|
|
put_user_ex(current->thread.error_code, &sc->err);
|
|
|
|
put_user_ex(regs->ip, &sc->ip);
|
2008-11-25 09:21:37 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
|
|
|
|
put_user_ex(regs->flags, &sc->flags);
|
|
|
|
put_user_ex(regs->sp, &sc->sp_at_signal);
|
|
|
|
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
|
2008-11-25 09:21:37 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(regs->flags, &sc->flags);
|
|
|
|
put_user_ex(regs->cs, &sc->cs);
|
|
|
|
put_user_ex(0, &sc->gs);
|
|
|
|
put_user_ex(0, &sc->fs);
|
2008-11-25 09:21:37 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_ex(fpstate, &sc->fpstate);
|
2008-11-25 09:21:37 +07:00
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
/* non-iBCS2 extensions.. */
|
|
|
|
put_user_ex(mask, &sc->oldmask);
|
|
|
|
put_user_ex(current->thread.cr2, &sc->cr2);
|
|
|
|
} put_user_catch(err);
|
2008-11-25 09:21:37 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2008-11-25 09:23:12 +07:00
|
|
|
* Set up a signal frame.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine which stack to use..
|
|
|
|
*/
|
2009-02-28 01:30:32 +07:00
|
|
|
static unsigned long align_sigframe(unsigned long sp)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Align the stack pointer according to the i386 ABI,
|
|
|
|
* i.e. so that on function entry ((sp + 4) & 15) == 0.
|
|
|
|
*/
|
|
|
|
sp = ((sp + 4) & -16ul) - 4;
|
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
sp = round_down(sp, 16) - 8;
|
|
|
|
#endif
|
|
|
|
return sp;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline void __user *
|
2008-07-30 00:29:21 +07:00
|
|
|
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
|
2009-02-28 01:27:04 +07:00
|
|
|
void __user **fpstate)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
/* Default to using normal stack */
|
2009-02-28 01:29:57 +07:00
|
|
|
unsigned long sp = regs->sp;
|
2009-03-20 00:56:29 +07:00
|
|
|
int onsigstack = on_sig_stack(sp);
|
2009-02-28 01:29:57 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* redzone */
|
|
|
|
sp -= 128;
|
|
|
|
#endif /* CONFIG_X86_64 */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-03-20 00:56:29 +07:00
|
|
|
if (!onsigstack) {
|
|
|
|
/* This is the X/Open sanctioned signal stack switching. */
|
|
|
|
if (ka->sa.sa_flags & SA_ONSTACK) {
|
2009-03-27 00:03:08 +07:00
|
|
|
if (current->sas_ss_size)
|
2009-03-20 00:56:29 +07:00
|
|
|
sp = current->sas_ss_sp + current->sas_ss_size;
|
|
|
|
} else {
|
2009-02-28 01:29:57 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2009-03-20 00:56:29 +07:00
|
|
|
/* This is the legacy signal stack switching. */
|
|
|
|
if ((regs->ss & 0xffff) != __USER_DS &&
|
|
|
|
!(ka->sa.sa_flags & SA_RESTORER) &&
|
|
|
|
ka->sa.sa_restorer)
|
|
|
|
sp = (unsigned long) ka->sa.sa_restorer;
|
2009-02-28 01:29:57 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
2009-03-20 00:56:29 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-07-30 00:29:21 +07:00
|
|
|
if (used_math()) {
|
2009-02-28 01:29:57 +07:00
|
|
|
sp -= sig_xstate_size;
|
2009-03-03 08:20:01 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
sp = round_down(sp, 64);
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
*fpstate = (void __user *)sp;
|
2008-07-30 00:29:21 +07:00
|
|
|
}
|
|
|
|
|
2009-03-20 00:56:29 +07:00
|
|
|
sp = align_sigframe(sp - frame_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are on the alternate signal stack and would overflow it, don't.
|
|
|
|
* Return an always-bogus address instead so we will die with SIGSEGV.
|
|
|
|
*/
|
|
|
|
if (onsigstack && !likely(on_sig_stack(sp)))
|
|
|
|
return (void __user *)-1L;
|
|
|
|
|
|
|
|
/* save i387 state */
|
|
|
|
if (used_math() && save_i387_xstate(*fpstate) < 0)
|
|
|
|
return (void __user *)-1L;
|
|
|
|
|
|
|
|
return (void __user *)sp;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-02-28 01:29:57 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
static const struct {
|
|
|
|
u16 poplmovl;
|
|
|
|
u32 val;
|
|
|
|
u16 int80;
|
|
|
|
} __attribute__((packed)) retcode = {
|
|
|
|
0xb858, /* popl %eax; movl $..., %eax */
|
|
|
|
__NR_sigreturn,
|
|
|
|
0x80cd, /* int $0x80 */
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct {
|
|
|
|
u8 movl;
|
|
|
|
u32 val;
|
|
|
|
u16 int80;
|
|
|
|
u8 pad;
|
|
|
|
} __attribute__((packed)) rt_retcode = {
|
|
|
|
0xb8, /* movl $..., %eax */
|
|
|
|
__NR_rt_sigreturn,
|
|
|
|
0x80cd, /* int $0x80 */
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
2008-03-06 16:33:08 +07:00
|
|
|
static int
|
2008-09-06 06:28:06 +07:00
|
|
|
__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
|
|
|
|
struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct sigframe __user *frame;
|
2008-03-06 16:33:08 +07:00
|
|
|
void __user *restorer;
|
2005-04-17 05:20:36 +07:00
|
|
|
int err = 0;
|
2008-07-30 00:29:22 +07:00
|
|
|
void __user *fpstate = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-30 00:29:21 +07:00
|
|
|
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-13 07:02:53 +07:00
|
|
|
if (__put_user(sig, &frame->sig))
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-13 07:02:53 +07:00
|
|
|
if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (_NSIG_WORDS > 1) {
|
2008-09-13 07:02:53 +07:00
|
|
|
if (__copy_to_user(&frame->extramask, &set->sig[1],
|
|
|
|
sizeof(frame->extramask)))
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-04-09 15:29:27 +07:00
|
|
|
if (current->mm->context.vdso)
|
2008-01-30 19:30:42 +07:00
|
|
|
restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
|
2007-02-13 19:26:26 +07:00
|
|
|
else
|
2008-01-30 19:33:23 +07:00
|
|
|
restorer = &frame->retcode;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ka->sa.sa_flags & SA_RESTORER)
|
|
|
|
restorer = ka->sa.sa_restorer;
|
|
|
|
|
|
|
|
/* Set up to return from userspace. */
|
|
|
|
err |= __put_user(restorer, &frame->pretcode);
|
2008-03-06 16:33:08 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2008-03-06 16:33:08 +07:00
|
|
|
* This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* WE DO NOT USE IT ANY MORE! It's only left here for historical
|
|
|
|
* reasons and because gdb uses it as a signature to notice
|
|
|
|
* signal handler stack frames.
|
|
|
|
*/
|
2008-11-12 10:09:29 +07:00
|
|
|
err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (err)
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Set up registers for signal handler */
|
2008-03-06 16:33:08 +07:00
|
|
|
regs->sp = (unsigned long)frame;
|
|
|
|
regs->ip = (unsigned long)ka->sa.sa_handler;
|
|
|
|
regs->ax = (unsigned long)sig;
|
2008-02-09 03:09:56 +07:00
|
|
|
regs->dx = 0;
|
|
|
|
regs->cx = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-01-30 19:30:56 +07:00
|
|
|
regs->ds = __USER_DS;
|
|
|
|
regs->es = __USER_DS;
|
|
|
|
regs->ss = __USER_DS;
|
|
|
|
regs->cs = __USER_CS;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-19 08:44:00 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-09-06 06:28:06 +07:00
|
|
|
static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
sigset_t *set, struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct rt_sigframe __user *frame;
|
2008-03-06 16:33:08 +07:00
|
|
|
void __user *restorer;
|
2005-04-17 05:20:36 +07:00
|
|
|
int err = 0;
|
2008-07-30 00:29:22 +07:00
|
|
|
void __user *fpstate = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-30 00:29:21 +07:00
|
|
|
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_try {
|
|
|
|
put_user_ex(sig, &frame->sig);
|
|
|
|
put_user_ex(&frame->info, &frame->pinfo);
|
|
|
|
put_user_ex(&frame->uc, &frame->puc);
|
|
|
|
err |= copy_siginfo_to_user(&frame->info, info);
|
|
|
|
|
|
|
|
/* Create the ucontext. */
|
|
|
|
if (cpu_has_xsave)
|
|
|
|
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
|
|
|
else
|
|
|
|
put_user_ex(0, &frame->uc.uc_flags);
|
|
|
|
put_user_ex(0, &frame->uc.uc_link);
|
|
|
|
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
|
|
|
put_user_ex(sas_ss_flags(regs->sp),
|
|
|
|
&frame->uc.uc_stack.ss_flags);
|
|
|
|
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
|
|
|
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
|
|
|
regs, set->sig[0]);
|
|
|
|
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
|
|
|
|
|
|
/* Set up to return from userspace. */
|
|
|
|
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
|
|
|
if (ka->sa.sa_flags & SA_RESTORER)
|
|
|
|
restorer = ka->sa.sa_restorer;
|
|
|
|
put_user_ex(restorer, &frame->pretcode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is movl $__NR_rt_sigreturn, %ax ; int $0x80
|
|
|
|
*
|
|
|
|
* WE DO NOT USE IT ANY MORE! It's only left here for historical
|
|
|
|
* reasons and because gdb uses it as a signature to notice
|
|
|
|
* signal handler stack frames.
|
|
|
|
*/
|
|
|
|
put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
|
|
|
|
} put_user_catch(err);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (err)
|
2008-09-13 07:01:09 +07:00
|
|
|
return -EFAULT;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Set up registers for signal handler */
|
2008-03-06 16:33:08 +07:00
|
|
|
regs->sp = (unsigned long)frame;
|
|
|
|
regs->ip = (unsigned long)ka->sa.sa_handler;
|
2008-09-06 06:28:38 +07:00
|
|
|
regs->ax = (unsigned long)sig;
|
2008-03-06 16:33:08 +07:00
|
|
|
regs->dx = (unsigned long)&frame->info;
|
|
|
|
regs->cx = (unsigned long)&frame->uc;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-01-30 19:30:56 +07:00
|
|
|
regs->ds = __USER_DS;
|
|
|
|
regs->es = __USER_DS;
|
|
|
|
regs->ss = __USER_DS;
|
|
|
|
regs->cs = __USER_CS;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-19 08:44:00 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2008-11-25 09:23:12 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
sigset_t *set, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct rt_sigframe __user *frame;
|
|
|
|
void __user *fp = NULL;
|
|
|
|
int err = 0;
|
|
|
|
struct task_struct *me = current;
|
|
|
|
|
2009-02-28 01:28:48 +07:00
|
|
|
frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
|
2008-11-25 09:23:12 +07:00
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (ka->sa.sa_flags & SA_SIGINFO) {
|
|
|
|
if (copy_siginfo_to_user(&frame->info, info))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_try {
|
|
|
|
/* Create the ucontext. */
|
|
|
|
if (cpu_has_xsave)
|
|
|
|
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
|
|
|
else
|
|
|
|
put_user_ex(0, &frame->uc.uc_flags);
|
|
|
|
put_user_ex(0, &frame->uc.uc_link);
|
|
|
|
put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
|
|
|
put_user_ex(sas_ss_flags(regs->sp),
|
|
|
|
&frame->uc.uc_stack.ss_flags);
|
|
|
|
put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
|
|
|
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
|
|
|
|
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
|
|
|
|
|
|
/* Set up to return from userspace. If provided, use a stub
|
|
|
|
already in userspace. */
|
|
|
|
/* x86-64 should always use SA_RESTORER. */
|
|
|
|
if (ka->sa.sa_flags & SA_RESTORER) {
|
|
|
|
put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
|
|
|
|
} else {
|
|
|
|
/* could use a vstub here */
|
|
|
|
err |= -EFAULT;
|
|
|
|
}
|
|
|
|
} put_user_catch(err);
|
2008-11-25 09:23:12 +07:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Set up registers for signal handler */
|
|
|
|
regs->di = sig;
|
|
|
|
/* In case the signal handler was declared without prototypes */
|
|
|
|
regs->ax = 0;
|
|
|
|
|
|
|
|
/* This also works for non SA_SIGINFO handlers because they expect the
|
|
|
|
next argument after the signal number on the stack. */
|
|
|
|
regs->si = (unsigned long)&frame->info;
|
|
|
|
regs->dx = (unsigned long)&frame->uc;
|
|
|
|
regs->ip = (unsigned long) ka->sa.sa_handler;
|
|
|
|
|
|
|
|
regs->sp = (unsigned long)frame;
|
|
|
|
|
|
|
|
/* Set up the CS register to run signal handlers in 64-bit mode,
|
|
|
|
even if the handler happens to be interrupting 32-bit code. */
|
|
|
|
regs->cs = __USER_CS;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2008-11-25 09:24:11 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-11-25 09:23:12 +07:00
|
|
|
/*
|
|
|
|
* Atomically swap in the new signal mask, and wait for a signal.
|
|
|
|
*/
|
|
|
|
asmlinkage int
|
|
|
|
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
|
|
|
|
{
|
|
|
|
mask &= _BLOCKABLE;
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
|
|
current->saved_sigmask = current->blocked;
|
|
|
|
siginitset(¤t->blocked, mask);
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
|
|
|
|
current->state = TASK_INTERRUPTIBLE;
|
|
|
|
schedule();
|
|
|
|
set_restore_sigmask();
|
|
|
|
|
|
|
|
return -ERESTARTNOHAND;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage int
|
|
|
|
sys_sigaction(int sig, const struct old_sigaction __user *act,
|
|
|
|
struct old_sigaction __user *oact)
|
|
|
|
{
|
|
|
|
struct k_sigaction new_ka, old_ka;
|
2009-01-24 06:50:10 +07:00
|
|
|
int ret = 0;
|
2008-11-25 09:23:12 +07:00
|
|
|
|
|
|
|
if (act) {
|
|
|
|
old_sigset_t mask;
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
|
2008-11-25 09:23:12 +07:00
|
|
|
return -EFAULT;
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
get_user_try {
|
|
|
|
get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
|
|
|
|
get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
|
|
|
|
get_user_ex(mask, &act->sa_mask);
|
|
|
|
get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
|
|
|
|
} get_user_catch(ret);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return -EFAULT;
|
2008-11-25 09:23:12 +07:00
|
|
|
siginitset(&new_ka.sa.sa_mask, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
|
|
|
|
|
|
|
if (!ret && oact) {
|
2009-01-24 06:50:10 +07:00
|
|
|
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
|
2008-11-25 09:23:12 +07:00
|
|
|
return -EFAULT;
|
|
|
|
|
2009-01-24 06:50:10 +07:00
|
|
|
put_user_try {
|
|
|
|
put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
|
|
|
|
put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
|
|
|
|
put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
|
|
|
|
put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
|
|
|
|
} put_user_catch(ret);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return -EFAULT;
|
2008-11-25 09:23:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2008-11-25 09:24:11 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
2008-11-25 09:23:12 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
2009-02-12 04:43:58 +07:00
|
|
|
int sys_sigaltstack(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
const stack_t __user *uss = (const stack_t __user *)regs->bx;
|
|
|
|
stack_t __user *uoss = (stack_t __user *)regs->cx;
|
|
|
|
|
|
|
|
return do_sigaltstack(uss, uoss, regs->sp);
|
|
|
|
}
|
2008-11-25 09:23:12 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
asmlinkage long
|
|
|
|
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return do_sigaltstack(uss, uoss, regs->sp);
|
|
|
|
}
|
2009-02-12 04:43:58 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
2008-11-25 09:23:12 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a signal return; undo the signal stack.
|
|
|
|
*/
|
2008-11-25 09:24:11 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2009-02-12 04:43:58 +07:00
|
|
|
unsigned long sys_sigreturn(struct pt_regs *regs)
|
2008-11-25 09:23:12 +07:00
|
|
|
{
|
|
|
|
struct sigframe __user *frame;
|
|
|
|
unsigned long ax;
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
frame = (struct sigframe __user *)(regs->sp - 8);
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
|
|
goto badframe;
|
|
|
|
if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
|
|
|
|
&& __copy_from_user(&set.sig[1], &frame->extramask,
|
|
|
|
sizeof(frame->extramask))))
|
|
|
|
goto badframe;
|
|
|
|
|
|
|
|
sigdelsetmask(&set, ~_BLOCKABLE);
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
|
|
current->blocked = set;
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
|
|
|
|
if (restore_sigcontext(regs, &frame->sc, &ax))
|
|
|
|
goto badframe;
|
|
|
|
return ax;
|
|
|
|
|
|
|
|
badframe:
|
2008-12-17 05:02:16 +07:00
|
|
|
signal_fault(regs, frame, "sigreturn");
|
2008-11-25 09:23:12 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2008-11-25 09:24:11 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
2008-11-25 09:23:12 +07:00
|
|
|
|
2009-02-12 07:31:40 +07:00
|
|
|
long sys_rt_sigreturn(struct pt_regs *regs)
|
2008-11-25 09:23:12 +07:00
|
|
|
{
|
|
|
|
struct rt_sigframe __user *frame;
|
|
|
|
unsigned long ax;
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
|
|
|
|
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
|
|
goto badframe;
|
|
|
|
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
|
|
|
goto badframe;
|
|
|
|
|
|
|
|
sigdelsetmask(&set, ~_BLOCKABLE);
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
|
|
current->blocked = set;
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
|
|
|
|
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
|
|
|
|
goto badframe;
|
|
|
|
|
|
|
|
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
|
|
|
goto badframe;
|
|
|
|
|
|
|
|
return ax;
|
|
|
|
|
|
|
|
badframe:
|
|
|
|
signal_fault(regs, frame, "rt_sigreturn");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2008-03-06 16:33:08 +07:00
|
|
|
* OK, we're invoking a handler:
|
|
|
|
*/
|
2008-09-25 09:10:29 +07:00
|
|
|
static int signr_convert(int sig)
|
|
|
|
{
|
2008-10-30 08:44:08 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-09-25 09:10:29 +07:00
|
|
|
struct thread_info *info = current_thread_info();
|
|
|
|
|
|
|
|
if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32)
|
|
|
|
return info->exec_domain->signal_invmap[sig];
|
2008-10-30 08:44:08 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
2008-09-25 09:10:29 +07:00
|
|
|
return sig;
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:46:07 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
|
2008-09-25 09:13:11 +07:00
|
|
|
#define is_ia32 1
|
2008-09-25 09:13:29 +07:00
|
|
|
#define ia32_setup_frame __setup_frame
|
|
|
|
#define ia32_setup_rt_frame __setup_rt_frame
|
2008-09-25 09:13:11 +07:00
|
|
|
|
2008-10-30 08:46:07 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
#define is_ia32 test_thread_flag(TIF_IA32)
|
|
|
|
#else /* !CONFIG_IA32_EMULATION */
|
|
|
|
#define is_ia32 0
|
|
|
|
#endif /* CONFIG_IA32_EMULATION */
|
|
|
|
|
2008-12-18 09:47:17 +07:00
|
|
|
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
sigset_t *set, struct pt_regs *regs);
|
|
|
|
int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|
|
|
sigset_t *set, struct pt_regs *regs);
|
|
|
|
|
2008-10-30 08:46:07 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2008-09-06 06:28:06 +07:00
|
|
|
static int
|
|
|
|
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
sigset_t *set, struct pt_regs *regs)
|
|
|
|
{
|
2008-09-25 09:10:29 +07:00
|
|
|
int usig = signr_convert(sig);
|
2008-09-06 06:28:06 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Set up the stack frame */
|
2008-09-25 09:13:11 +07:00
|
|
|
if (is_ia32) {
|
|
|
|
if (ka->sa.sa_flags & SA_SIGINFO)
|
2008-09-25 09:13:29 +07:00
|
|
|
ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
|
2008-09-25 09:13:11 +07:00
|
|
|
else
|
2008-09-25 09:13:29 +07:00
|
|
|
ret = ia32_setup_frame(usig, ka, set, regs);
|
2008-09-25 09:13:11 +07:00
|
|
|
} else
|
|
|
|
ret = __setup_rt_frame(sig, ka, info, set, regs);
|
2008-09-06 06:28:06 +07:00
|
|
|
|
2008-09-13 07:01:09 +07:00
|
|
|
if (ret) {
|
|
|
|
force_sigsegv(sig, current);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2008-09-06 06:28:06 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-06-23 14:08:21 +07:00
|
|
|
static int
|
2005-04-17 05:20:36 +07:00
|
|
|
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
2008-02-09 03:09:58 +07:00
|
|
|
sigset_t *oldset, struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-06-23 14:08:21 +07:00
|
|
|
int ret;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Are we from a system call? */
|
2008-09-06 06:26:55 +07:00
|
|
|
if (syscall_get_nr(current, regs) >= 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* If so, check system call restarting.. */
|
2008-09-06 06:26:55 +07:00
|
|
|
switch (syscall_get_error(current, regs)) {
|
2008-02-09 03:09:58 +07:00
|
|
|
case -ERESTART_RESTARTBLOCK:
|
|
|
|
case -ERESTARTNOHAND:
|
|
|
|
regs->ax = -EINTR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
if (!(ka->sa.sa_flags & SA_RESTART)) {
|
2008-01-30 19:30:56 +07:00
|
|
|
regs->ax = -EINTR;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
2008-02-09 03:09:58 +07:00
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case -ERESTARTNOINTR:
|
|
|
|
regs->ax = regs->orig_ax;
|
|
|
|
regs->ip -= 2;
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-01-30 19:30:50 +07:00
|
|
|
* If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
|
|
|
|
* flag so that register information in the sigcontext is correct.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2008-01-30 19:30:56 +07:00
|
|
|
if (unlikely(regs->flags & X86_EFLAGS_TF) &&
|
2008-01-30 19:30:50 +07:00
|
|
|
likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
|
2008-01-30 19:30:56 +07:00
|
|
|
regs->flags &= ~X86_EFLAGS_TF;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-06 06:28:06 +07:00
|
|
|
ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-03-06 16:33:08 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2005-06-23 14:08:21 +07:00
|
|
|
|
2008-09-24 07:22:32 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
|
|
* This has nothing to do with segment registers,
|
|
|
|
* despite the name. This magic affects uaccess.h
|
|
|
|
* macros' behavior. Reset it to the normal setting.
|
|
|
|
*/
|
|
|
|
set_fs(USER_DS);
|
|
|
|
#endif
|
|
|
|
|
2008-04-20 04:26:54 +07:00
|
|
|
/*
|
|
|
|
* Clear the direction flag as per the ABI for function entry.
|
|
|
|
*/
|
|
|
|
regs->flags &= ~X86_EFLAGS_DF;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear TF when entering the signal handler, but
|
|
|
|
* notify any tracer that was single-stepping it.
|
|
|
|
* The tracer may want to single-step inside the
|
|
|
|
* handler too.
|
|
|
|
*/
|
|
|
|
regs->flags &= ~X86_EFLAGS_TF;
|
|
|
|
|
2008-03-06 16:33:08 +07:00
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
|
|
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
|
|
|
if (!(ka->sa.sa_flags & SA_NODEFER))
|
|
|
|
sigaddset(¤t->blocked, sig);
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
|
2008-03-15 07:46:38 +07:00
|
|
|
tracehook_signal_handler(sig, info, ka, regs,
|
|
|
|
test_thread_flag(TIF_SINGLESTEP));
|
|
|
|
|
2008-03-06 16:33:08 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-10-30 08:46:40 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-09-06 06:27:39 +07:00
|
|
|
#define NR_restart_syscall __NR_restart_syscall
|
2008-10-30 08:46:40 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
#define NR_restart_syscall \
|
|
|
|
test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
|
|
|
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
|
|
|
* mistake.
|
|
|
|
*/
|
2008-01-30 19:31:17 +07:00
|
|
|
static void do_signal(struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-02-09 03:09:58 +07:00
|
|
|
struct k_sigaction ka;
|
2005-04-17 05:20:36 +07:00
|
|
|
siginfo_t info;
|
|
|
|
int signr;
|
2006-01-19 08:44:00 +07:00
|
|
|
sigset_t *oldset;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
2008-02-09 03:09:58 +07:00
|
|
|
* We want the common case to go fast, which is why we may in certain
|
|
|
|
* cases get here from kernel mode. Just return without doing anything
|
|
|
|
* if so.
|
|
|
|
* X86_32: vm86 regs switched out by assembly code before reaching
|
|
|
|
* here, so testing against kernel CS suffices.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2005-06-23 14:08:45 +07:00
|
|
|
if (!user_mode(regs))
|
2006-01-19 08:44:00 +07:00
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-04-30 14:53:10 +07:00
|
|
|
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
|
2006-01-19 08:44:00 +07:00
|
|
|
oldset = ¤t->saved_sigmask;
|
|
|
|
else
|
2005-04-17 05:20:36 +07:00
|
|
|
oldset = ¤t->blocked;
|
|
|
|
|
|
|
|
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
|
|
|
if (signr > 0) {
|
2008-03-06 16:33:08 +07:00
|
|
|
/*
|
|
|
|
* Re-enable any watchpoints before delivering the
|
2005-04-17 05:20:36 +07:00
|
|
|
* signal to user space. The processor register will
|
|
|
|
* have been cleared if the watchpoint triggered
|
|
|
|
* inside the kernel.
|
|
|
|
*/
|
2008-02-09 03:09:58 +07:00
|
|
|
if (current->thread.debugreg7)
|
2008-01-30 19:30:59 +07:00
|
|
|
set_debugreg(current->thread.debugreg7, 7);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-03-06 16:33:08 +07:00
|
|
|
/* Whee! Actually deliver the signal. */
|
2006-01-19 08:44:00 +07:00
|
|
|
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
|
2008-03-06 16:33:08 +07:00
|
|
|
/*
|
2008-04-30 14:53:10 +07:00
|
|
|
* A signal was successfully delivered; the saved
|
2006-01-19 08:44:00 +07:00
|
|
|
* sigmask will have been stored in the signal frame,
|
|
|
|
* and will be restored by sigreturn, so we can simply
|
2008-04-30 14:53:10 +07:00
|
|
|
* clear the TS_RESTORE_SIGMASK flag.
|
2008-03-06 16:33:08 +07:00
|
|
|
*/
|
2008-04-30 14:53:10 +07:00
|
|
|
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
2006-01-19 08:44:00 +07:00
|
|
|
}
|
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Did we come from a system call? */
|
2008-09-06 06:26:55 +07:00
|
|
|
if (syscall_get_nr(current, regs) >= 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Restart the system call - no handlers present */
|
2008-09-06 06:26:55 +07:00
|
|
|
switch (syscall_get_error(current, regs)) {
|
2006-01-19 08:44:00 +07:00
|
|
|
case -ERESTARTNOHAND:
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
case -ERESTARTNOINTR:
|
2008-01-30 19:30:56 +07:00
|
|
|
regs->ax = regs->orig_ax;
|
|
|
|
regs->ip -= 2;
|
2006-01-19 08:44:00 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case -ERESTART_RESTARTBLOCK:
|
2008-09-06 06:27:39 +07:00
|
|
|
regs->ax = NR_restart_syscall;
|
2008-01-30 19:30:56 +07:00
|
|
|
regs->ip -= 2;
|
2006-01-19 08:44:00 +07:00
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
2006-01-19 08:44:00 +07:00
|
|
|
|
2008-02-09 03:09:58 +07:00
|
|
|
/*
|
|
|
|
* If there's no signal to deliver, we just put the saved sigmask
|
|
|
|
* back.
|
|
|
|
*/
|
2008-04-30 14:53:10 +07:00
|
|
|
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
|
|
|
|
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
2006-01-19 08:44:00 +07:00
|
|
|
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* notification of userspace execution resumption
|
2006-01-19 08:44:00 +07:00
|
|
|
* - triggered by the TIF_WORK_MASK flags
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2008-03-06 16:33:08 +07:00
|
|
|
void
|
|
|
|
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-04-29 00:07:31 +07:00
|
|
|
#ifdef CONFIG_X86_NEW_MCE
|
2008-09-24 07:21:45 +07:00
|
|
|
/* notify userspace of pending MCEs */
|
|
|
|
if (thread_info_flags & _TIF_MCE_NOTIFY)
|
2009-05-28 02:56:58 +07:00
|
|
|
mce_notify_irq();
|
2008-09-24 07:21:45 +07:00
|
|
|
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* deal with pending signal delivery */
|
2008-04-30 14:53:10 +07:00
|
|
|
if (thread_info_flags & _TIF_SIGPENDING)
|
2006-01-19 08:44:00 +07:00
|
|
|
do_signal(regs);
|
2008-01-26 03:08:29 +07:00
|
|
|
|
2008-04-20 09:10:57 +07:00
|
|
|
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
|
|
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
|
|
|
tracehook_notify_resume(regs);
|
|
|
|
}
|
|
|
|
|
2008-09-24 07:21:45 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2005-04-17 05:20:36 +07:00
|
|
|
clear_thread_flag(TIF_IRET);
|
2008-09-24 07:21:45 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2008-09-06 06:27:11 +07:00
|
|
|
|
|
|
|
void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
|
|
|
|
{
|
|
|
|
struct task_struct *me = current;
|
|
|
|
|
|
|
|
if (show_unhandled_signals && printk_ratelimit()) {
|
2008-12-17 05:02:16 +07:00
|
|
|
printk("%s"
|
2008-09-06 06:27:11 +07:00
|
|
|
"%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
|
2008-12-17 05:02:16 +07:00
|
|
|
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
|
2008-09-06 06:27:11 +07:00
|
|
|
me->comm, me->pid, where, frame,
|
|
|
|
regs->ip, regs->sp, regs->orig_ax);
|
|
|
|
print_vma_addr(" in ", regs->ip);
|
|
|
|
printk(KERN_CONT "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
force_sig(SIGSEGV, me);
|
|
|
|
}
|