2007-11-30 16:42:27 +07:00
|
|
|
/*
|
2005-04-17 05:20:36 +07:00
|
|
|
* Save/restore floating point context for signal handlers.
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
|
2007-11-30 16:42:27 +07:00
|
|
|
* Copyright (C) 2006 ST Microelectronics Ltd. (denorm support)
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2007-11-30 16:42:27 +07:00
|
|
|
* FIXME! These routines have not been tested for big endian case.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/signal.h>
|
2007-11-30 16:42:27 +07:00
|
|
|
#include <linux/io.h>
|
2008-07-29 06:09:44 +07:00
|
|
|
#include <cpu/fpu.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/processor.h>
|
2008-03-26 17:02:47 +07:00
|
|
|
#include <asm/fpu.h>
|
2012-03-30 17:29:57 +07:00
|
|
|
#include <asm/traps.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* The PR (precision) bit in the FP Status Register must be clear when
|
|
|
|
* an frchg instruction is executed, otherwise the instruction is undefined.
|
|
|
|
* Executing frchg with PR set causes a trap on some SH4 implementations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define FPSCR_RCHG 0x00000000
|
2007-11-30 16:42:27 +07:00
|
|
|
extern unsigned long long float64_div(unsigned long long a,
|
|
|
|
unsigned long long b);
|
|
|
|
extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
|
|
|
|
extern unsigned long long float64_mul(unsigned long long a,
|
|
|
|
unsigned long long b);
|
|
|
|
extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
|
|
|
|
extern unsigned long long float64_add(unsigned long long a,
|
|
|
|
unsigned long long b);
|
|
|
|
extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
|
|
|
|
extern unsigned long long float64_sub(unsigned long long a,
|
|
|
|
unsigned long long b);
|
|
|
|
extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
|
2008-09-05 14:36:19 +07:00
|
|
|
extern unsigned long int float64_to_float32(unsigned long long a);
|
2007-11-30 16:42:27 +07:00
|
|
|
static unsigned int fpu_exception_flags;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Save FPU registers onto task structure.
|
|
|
|
*/
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
void save_fpu(struct task_struct *tsk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long dummy;
|
|
|
|
|
|
|
|
enable_fpu();
|
2007-11-30 16:42:27 +07:00
|
|
|
asm volatile ("sts.l fpul, @-%0\n\t"
|
|
|
|
"sts.l fpscr, @-%0\n\t"
|
|
|
|
"lds %2, fpscr\n\t"
|
|
|
|
"frchg\n\t"
|
|
|
|
"fmov.s fr15, @-%0\n\t"
|
|
|
|
"fmov.s fr14, @-%0\n\t"
|
|
|
|
"fmov.s fr13, @-%0\n\t"
|
|
|
|
"fmov.s fr12, @-%0\n\t"
|
|
|
|
"fmov.s fr11, @-%0\n\t"
|
|
|
|
"fmov.s fr10, @-%0\n\t"
|
|
|
|
"fmov.s fr9, @-%0\n\t"
|
|
|
|
"fmov.s fr8, @-%0\n\t"
|
|
|
|
"fmov.s fr7, @-%0\n\t"
|
|
|
|
"fmov.s fr6, @-%0\n\t"
|
|
|
|
"fmov.s fr5, @-%0\n\t"
|
|
|
|
"fmov.s fr4, @-%0\n\t"
|
|
|
|
"fmov.s fr3, @-%0\n\t"
|
|
|
|
"fmov.s fr2, @-%0\n\t"
|
|
|
|
"fmov.s fr1, @-%0\n\t"
|
|
|
|
"fmov.s fr0, @-%0\n\t"
|
|
|
|
"frchg\n\t"
|
|
|
|
"fmov.s fr15, @-%0\n\t"
|
|
|
|
"fmov.s fr14, @-%0\n\t"
|
|
|
|
"fmov.s fr13, @-%0\n\t"
|
|
|
|
"fmov.s fr12, @-%0\n\t"
|
|
|
|
"fmov.s fr11, @-%0\n\t"
|
|
|
|
"fmov.s fr10, @-%0\n\t"
|
|
|
|
"fmov.s fr9, @-%0\n\t"
|
|
|
|
"fmov.s fr8, @-%0\n\t"
|
|
|
|
"fmov.s fr7, @-%0\n\t"
|
|
|
|
"fmov.s fr6, @-%0\n\t"
|
|
|
|
"fmov.s fr5, @-%0\n\t"
|
|
|
|
"fmov.s fr4, @-%0\n\t"
|
|
|
|
"fmov.s fr3, @-%0\n\t"
|
|
|
|
"fmov.s fr2, @-%0\n\t"
|
|
|
|
"fmov.s fr1, @-%0\n\t"
|
|
|
|
"fmov.s fr0, @-%0\n\t"
|
|
|
|
"lds %3, fpscr\n\t":"=r" (dummy)
|
2010-01-13 10:51:40 +07:00
|
|
|
:"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
|
2007-11-30 16:42:27 +07:00
|
|
|
"r"(FPSCR_RCHG), "r"(FPSCR_INIT)
|
|
|
|
:"memory");
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-11-26 18:38:36 +07:00
|
|
|
disable_fpu();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-01-13 10:51:40 +07:00
|
|
|
void restore_fpu(struct task_struct *tsk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long dummy;
|
|
|
|
|
2007-11-26 18:38:36 +07:00
|
|
|
enable_fpu();
|
2007-11-30 16:42:27 +07:00
|
|
|
asm volatile ("lds %2, fpscr\n\t"
|
|
|
|
"fmov.s @%0+, fr0\n\t"
|
|
|
|
"fmov.s @%0+, fr1\n\t"
|
|
|
|
"fmov.s @%0+, fr2\n\t"
|
|
|
|
"fmov.s @%0+, fr3\n\t"
|
|
|
|
"fmov.s @%0+, fr4\n\t"
|
|
|
|
"fmov.s @%0+, fr5\n\t"
|
|
|
|
"fmov.s @%0+, fr6\n\t"
|
|
|
|
"fmov.s @%0+, fr7\n\t"
|
|
|
|
"fmov.s @%0+, fr8\n\t"
|
|
|
|
"fmov.s @%0+, fr9\n\t"
|
|
|
|
"fmov.s @%0+, fr10\n\t"
|
|
|
|
"fmov.s @%0+, fr11\n\t"
|
|
|
|
"fmov.s @%0+, fr12\n\t"
|
|
|
|
"fmov.s @%0+, fr13\n\t"
|
|
|
|
"fmov.s @%0+, fr14\n\t"
|
|
|
|
"fmov.s @%0+, fr15\n\t"
|
|
|
|
"frchg\n\t"
|
|
|
|
"fmov.s @%0+, fr0\n\t"
|
|
|
|
"fmov.s @%0+, fr1\n\t"
|
|
|
|
"fmov.s @%0+, fr2\n\t"
|
|
|
|
"fmov.s @%0+, fr3\n\t"
|
|
|
|
"fmov.s @%0+, fr4\n\t"
|
|
|
|
"fmov.s @%0+, fr5\n\t"
|
|
|
|
"fmov.s @%0+, fr6\n\t"
|
|
|
|
"fmov.s @%0+, fr7\n\t"
|
|
|
|
"fmov.s @%0+, fr8\n\t"
|
|
|
|
"fmov.s @%0+, fr9\n\t"
|
|
|
|
"fmov.s @%0+, fr10\n\t"
|
|
|
|
"fmov.s @%0+, fr11\n\t"
|
|
|
|
"fmov.s @%0+, fr12\n\t"
|
|
|
|
"fmov.s @%0+, fr13\n\t"
|
|
|
|
"fmov.s @%0+, fr14\n\t"
|
|
|
|
"fmov.s @%0+, fr15\n\t"
|
|
|
|
"frchg\n\t"
|
|
|
|
"lds.l @%0+, fpscr\n\t"
|
|
|
|
"lds.l @%0+, fpul\n\t"
|
|
|
|
:"=r" (dummy)
|
2010-01-13 10:51:40 +07:00
|
|
|
:"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
|
2007-11-30 16:42:27 +07:00
|
|
|
:"memory");
|
2005-04-17 05:20:36 +07:00
|
|
|
disable_fpu();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-11-30 16:42:27 +07:00
|
|
|
* denormal_to_double - Given denormalized float number,
|
|
|
|
* store double float
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2007-11-30 16:42:27 +07:00
|
|
|
* @fpu: Pointer to sh_fpu_hard structure
|
|
|
|
* @n: Index to FP register
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2007-11-30 16:42:27 +07:00
|
|
|
static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long du, dl;
|
|
|
|
unsigned long x = fpu->fpul;
|
|
|
|
int exp = 1023 - 126;
|
|
|
|
|
|
|
|
if (x != 0 && (x & 0x7f800000) == 0) {
|
|
|
|
du = (x & 0x80000000);
|
|
|
|
while ((x & 0x00800000) == 0) {
|
|
|
|
x <<= 1;
|
|
|
|
exp--;
|
|
|
|
}
|
|
|
|
x &= 0x007fffff;
|
|
|
|
du |= (exp << 20) | (x >> 3);
|
|
|
|
dl = x << 29;
|
|
|
|
|
|
|
|
fpu->fp_regs[n] = du;
|
2007-11-30 16:42:27 +07:00
|
|
|
fpu->fp_regs[n + 1] = dl;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ieee_fpe_handler - Handle denormalized number exception
|
|
|
|
*
|
|
|
|
* @regs: Pointer to register structure
|
|
|
|
*
|
|
|
|
* Returns 1 when it's handled (should not cause exception).
|
|
|
|
*/
|
2007-11-30 16:42:27 +07:00
|
|
|
static int ieee_fpe_handler(struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-11-30 16:42:27 +07:00
|
|
|
unsigned short insn = *(unsigned short *)regs->pc;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned short finsn;
|
|
|
|
unsigned long nextpc;
|
|
|
|
int nib[4] = {
|
|
|
|
(insn >> 12) & 0xf,
|
|
|
|
(insn >> 8) & 0xf,
|
|
|
|
(insn >> 4) & 0xf,
|
2007-11-30 16:42:27 +07:00
|
|
|
insn & 0xf
|
|
|
|
};
|
|
|
|
|
|
|
|
if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
|
|
|
|
regs->pr = regs->pc + 4; /* bsr & jsr */
|
|
|
|
|
|
|
|
if (nib[0] == 0xa || nib[0] == 0xb) {
|
|
|
|
/* bra & bsr */
|
|
|
|
nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
|
|
|
|
finsn = *(unsigned short *)(regs->pc + 2);
|
|
|
|
} else if (nib[0] == 0x8 && nib[1] == 0xd) {
|
|
|
|
/* bt/s */
|
2005-04-17 05:20:36 +07:00
|
|
|
if (regs->sr & 1)
|
2007-11-30 16:42:27 +07:00
|
|
|
nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
|
|
|
nextpc = regs->pc + 4;
|
2007-11-30 16:42:27 +07:00
|
|
|
finsn = *(unsigned short *)(regs->pc + 2);
|
|
|
|
} else if (nib[0] == 0x8 && nib[1] == 0xf) {
|
|
|
|
/* bf/s */
|
2005-04-17 05:20:36 +07:00
|
|
|
if (regs->sr & 1)
|
|
|
|
nextpc = regs->pc + 4;
|
|
|
|
else
|
2007-11-30 16:42:27 +07:00
|
|
|
nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
|
|
|
|
finsn = *(unsigned short *)(regs->pc + 2);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
|
2007-11-30 16:42:27 +07:00
|
|
|
(nib[2] == 0x0 || nib[2] == 0x2)) {
|
|
|
|
/* jmp & jsr */
|
2005-04-17 05:20:36 +07:00
|
|
|
nextpc = regs->regs[nib[1]];
|
2007-11-30 16:42:27 +07:00
|
|
|
finsn = *(unsigned short *)(regs->pc + 2);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
|
2007-11-30 16:42:27 +07:00
|
|
|
(nib[2] == 0x0 || nib[2] == 0x2)) {
|
|
|
|
/* braf & bsrf */
|
2005-04-17 05:20:36 +07:00
|
|
|
nextpc = regs->pc + 4 + regs->regs[nib[1]];
|
2007-11-30 16:42:27 +07:00
|
|
|
finsn = *(unsigned short *)(regs->pc + 2);
|
|
|
|
} else if (insn == 0x000b) {
|
|
|
|
/* rts */
|
2005-04-17 05:20:36 +07:00
|
|
|
nextpc = regs->pr;
|
2007-11-30 16:42:27 +07:00
|
|
|
finsn = *(unsigned short *)(regs->pc + 2);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
2007-05-08 13:31:48 +07:00
|
|
|
nextpc = regs->pc + instruction_size(insn);
|
2005-04-17 05:20:36 +07:00
|
|
|
finsn = insn;
|
|
|
|
}
|
|
|
|
|
2007-11-30 16:42:27 +07:00
|
|
|
if ((finsn & 0xf1ff) == 0xf0ad) {
|
|
|
|
/* fcnvsd */
|
2005-04-17 05:20:36 +07:00
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
2010-01-13 10:51:40 +07:00
|
|
|
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
|
2005-04-17 05:20:36 +07:00
|
|
|
/* FPU error */
|
2010-01-13 10:51:40 +07:00
|
|
|
denormal_to_double(&tsk->thread.xstate->hardfpu,
|
2007-11-30 16:42:27 +07:00
|
|
|
(finsn >> 8) & 0xf);
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
regs->pc = nextpc;
|
|
|
|
return 1;
|
|
|
|
} else if ((finsn & 0xf00f) == 0xf002) {
|
|
|
|
/* fmul */
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
int fpscr;
|
|
|
|
int n, m, prec;
|
|
|
|
unsigned int hx, hy;
|
|
|
|
|
|
|
|
n = (finsn >> 8) & 0xf;
|
|
|
|
m = (finsn >> 4) & 0xf;
|
2010-01-13 10:51:40 +07:00
|
|
|
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
|
|
|
|
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
|
|
|
|
fpscr = tsk->thread.xstate->hardfpu.fpscr;
|
2007-11-30 16:42:27 +07:00
|
|
|
prec = fpscr & FPSCR_DBL_PRECISION;
|
|
|
|
|
|
|
|
if ((fpscr & FPSCR_CAUSE_ERROR)
|
|
|
|
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|
|
|
|
|| (hy & 0x7fffffff) < 0x00100000))) {
|
|
|
|
long long llx, lly;
|
|
|
|
|
|
|
|
/* FPU error because of denormal (doubles) */
|
|
|
|
llx = ((long long)hx << 32)
|
2010-01-13 10:51:40 +07:00
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
|
2007-11-30 16:42:27 +07:00
|
|
|
lly = ((long long)hy << 32)
|
2010-01-13 10:51:40 +07:00
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
|
2007-11-30 16:42:27 +07:00
|
|
|
llx = float64_mul(llx, lly);
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
|
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
|
2007-11-30 16:42:27 +07:00
|
|
|
} else if ((fpscr & FPSCR_CAUSE_ERROR)
|
|
|
|
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|
|
|
|
|| (hy & 0x7fffffff) < 0x00800000))) {
|
|
|
|
/* FPU error because of denormal (floats) */
|
|
|
|
hx = float32_mul(hx, hy);
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
|
2007-11-30 16:42:27 +07:00
|
|
|
} else
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
regs->pc = nextpc;
|
|
|
|
return 1;
|
|
|
|
} else if ((finsn & 0xf00e) == 0xf000) {
|
|
|
|
/* fadd, fsub */
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
int fpscr;
|
|
|
|
int n, m, prec;
|
|
|
|
unsigned int hx, hy;
|
|
|
|
|
|
|
|
n = (finsn >> 8) & 0xf;
|
|
|
|
m = (finsn >> 4) & 0xf;
|
2010-01-13 10:51:40 +07:00
|
|
|
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
|
|
|
|
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
|
|
|
|
fpscr = tsk->thread.xstate->hardfpu.fpscr;
|
2007-11-30 16:42:27 +07:00
|
|
|
prec = fpscr & FPSCR_DBL_PRECISION;
|
|
|
|
|
|
|
|
if ((fpscr & FPSCR_CAUSE_ERROR)
|
|
|
|
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|
|
|
|
|| (hy & 0x7fffffff) < 0x00100000))) {
|
|
|
|
long long llx, lly;
|
|
|
|
|
|
|
|
/* FPU error because of denormal (doubles) */
|
|
|
|
llx = ((long long)hx << 32)
|
2010-01-13 10:51:40 +07:00
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
|
2007-11-30 16:42:27 +07:00
|
|
|
lly = ((long long)hy << 32)
|
2010-01-13 10:51:40 +07:00
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
|
2007-11-30 16:42:27 +07:00
|
|
|
if ((finsn & 0xf00f) == 0xf000)
|
|
|
|
llx = float64_add(llx, lly);
|
|
|
|
else
|
|
|
|
llx = float64_sub(llx, lly);
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
|
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
|
2007-11-30 16:42:27 +07:00
|
|
|
} else if ((fpscr & FPSCR_CAUSE_ERROR)
|
|
|
|
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|
|
|
|
|| (hy & 0x7fffffff) < 0x00800000))) {
|
|
|
|
/* FPU error because of denormal (floats) */
|
|
|
|
if ((finsn & 0xf00f) == 0xf000)
|
|
|
|
hx = float32_add(hx, hy);
|
|
|
|
else
|
|
|
|
hx = float32_sub(hx, hy);
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
|
2007-11-30 16:42:27 +07:00
|
|
|
} else
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
regs->pc = nextpc;
|
|
|
|
return 1;
|
|
|
|
} else if ((finsn & 0xf003) == 0xf003) {
|
|
|
|
/* fdiv */
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
int fpscr;
|
|
|
|
int n, m, prec;
|
|
|
|
unsigned int hx, hy;
|
|
|
|
|
|
|
|
n = (finsn >> 8) & 0xf;
|
|
|
|
m = (finsn >> 4) & 0xf;
|
2010-01-13 10:51:40 +07:00
|
|
|
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
|
|
|
|
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
|
|
|
|
fpscr = tsk->thread.xstate->hardfpu.fpscr;
|
2007-11-30 16:42:27 +07:00
|
|
|
prec = fpscr & FPSCR_DBL_PRECISION;
|
|
|
|
|
|
|
|
if ((fpscr & FPSCR_CAUSE_ERROR)
|
|
|
|
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|
|
|
|
|| (hy & 0x7fffffff) < 0x00100000))) {
|
|
|
|
long long llx, lly;
|
|
|
|
|
|
|
|
/* FPU error because of denormal (doubles) */
|
|
|
|
llx = ((long long)hx << 32)
|
2010-01-13 10:51:40 +07:00
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
|
2007-11-30 16:42:27 +07:00
|
|
|
lly = ((long long)hy << 32)
|
2010-01-13 10:51:40 +07:00
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
|
2007-11-30 16:42:27 +07:00
|
|
|
|
|
|
|
llx = float64_div(llx, lly);
|
|
|
|
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
|
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
|
2007-11-30 16:42:27 +07:00
|
|
|
} else if ((fpscr & FPSCR_CAUSE_ERROR)
|
|
|
|
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|
|
|
|
|| (hy & 0x7fffffff) < 0x00800000))) {
|
|
|
|
/* FPU error because of denormal (floats) */
|
|
|
|
hx = float32_div(hx, hy);
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
|
2006-11-21 11:34:04 +07:00
|
|
|
} else
|
2007-11-30 16:42:27 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 14:36:19 +07:00
|
|
|
regs->pc = nextpc;
|
|
|
|
return 1;
|
|
|
|
} else if ((finsn & 0xf0bd) == 0xf0bd) {
|
|
|
|
/* fcnvds - double to single precision convert */
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
int m;
|
|
|
|
unsigned int hx;
|
|
|
|
|
2008-10-15 22:48:16 +07:00
|
|
|
m = (finsn >> 8) & 0x7;
|
2010-01-13 10:51:40 +07:00
|
|
|
hx = tsk->thread.xstate->hardfpu.fp_regs[m];
|
2008-09-05 14:36:19 +07:00
|
|
|
|
2010-01-13 10:51:40 +07:00
|
|
|
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
|
2008-09-05 14:36:19 +07:00
|
|
|
&& ((hx & 0x7fffffff) < 0x00100000)) {
|
|
|
|
/* subnormal double to float conversion */
|
|
|
|
long long llx;
|
|
|
|
|
2010-01-13 10:51:40 +07:00
|
|
|
llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
|
|
|
|
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
|
2008-09-05 14:36:19 +07:00
|
|
|
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
|
2008-09-05 14:36:19 +07:00
|
|
|
} else
|
|
|
|
return 0;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
regs->pc = nextpc;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-30 16:42:27 +07:00
|
|
|
void float_raise(unsigned int flags)
|
|
|
|
{
|
|
|
|
fpu_exception_flags |= flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
int float_rounding_mode(void)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
2010-01-13 10:51:40 +07:00
|
|
|
int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
|
2007-11-30 16:42:27 +07:00
|
|
|
return roundingMode;
|
|
|
|
}
|
|
|
|
|
2007-11-26 18:38:36 +07:00
|
|
|
BUILD_TRAP_HANDLER(fpu_error)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
2007-11-26 18:38:36 +07:00
|
|
|
TRAP_HANDLER_DECL;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
__unlazy_fpu(tsk, regs);
|
2007-11-30 16:42:27 +07:00
|
|
|
fpu_exception_flags = 0;
|
|
|
|
if (ieee_fpe_handler(regs)) {
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fpscr &=
|
2007-11-30 16:42:27 +07:00
|
|
|
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
|
2007-11-30 16:42:27 +07:00
|
|
|
/* Set the FPSCR flag as well as cause bits - simply
|
|
|
|
* replicate the cause */
|
2010-01-13 10:51:40 +07:00
|
|
|
tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
|
2007-11-30 16:42:27 +07:00
|
|
|
grab_fpu(regs);
|
|
|
|
restore_fpu(tsk);
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
task_thread_info(tsk)->status |= TS_USEDFPU;
|
2010-01-13 10:51:40 +07:00
|
|
|
if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
|
2007-11-30 16:42:27 +07:00
|
|
|
(fpu_exception_flags >> 2)) == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
force_sig(SIGFPE, tsk);
|
|
|
|
}
|