2007-11-22 15:30:50 +07:00
|
|
|
#ifndef __ASM_SH_FPU_H
|
|
|
|
#define __ASM_SH_FPU_H
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
2008-03-26 17:02:47 +07:00
|
|
|
#include <linux/preempt.h>
|
2007-11-22 15:30:50 +07:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_SH_FPU
|
|
|
|
static inline void release_fpu(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
regs->sr |= SR_FD;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void grab_fpu(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
regs->sr &= ~SR_FD;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct task_struct;
|
|
|
|
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
extern void save_fpu(struct task_struct *__tsk);
|
2009-07-07 21:25:10 +07:00
|
|
|
void fpu_state_restore(struct pt_regs *regs);
|
2007-11-22 15:30:50 +07:00
|
|
|
#else
|
2008-03-26 17:09:21 +07:00
|
|
|
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
#define save_fpu(tsk) do { } while (0)
|
2007-11-22 15:30:50 +07:00
|
|
|
#define release_fpu(regs) do { } while (0)
|
|
|
|
#define grab_fpu(regs) do { } while (0)
|
2009-11-25 10:07:31 +07:00
|
|
|
#define fpu_state_restore(regs) do { } while (0)
|
2008-03-26 17:09:21 +07:00
|
|
|
|
2007-11-22 15:30:50 +07:00
|
|
|
#endif
|
|
|
|
|
2008-09-21 17:04:55 +07:00
|
|
|
struct user_regset;
|
|
|
|
|
2007-11-26 18:38:36 +07:00
|
|
|
extern int do_fpu_inst(unsigned short, struct pt_regs *);
|
|
|
|
|
2008-09-21 17:04:55 +07:00
|
|
|
extern int fpregs_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf);
|
|
|
|
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (task_thread_info(tsk)->status & TS_USEDFPU) {
|
|
|
|
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
|
|
|
save_fpu(tsk);
|
|
|
|
release_fpu(regs);
|
|
|
|
} else
|
|
|
|
tsk->fpu_counter = 0;
|
|
|
|
}
|
|
|
|
|
2008-03-26 17:02:47 +07:00
|
|
|
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
preempt_disable();
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
__unlazy_fpu(tsk, regs);
|
2008-03-26 17:02:47 +07:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
preempt_disable();
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 00:25:10 +07:00
|
|
|
if (task_thread_info(tsk)->status & TS_USEDFPU) {
|
|
|
|
task_thread_info(tsk)->status &= ~TS_USEDFPU;
|
2008-03-26 17:02:47 +07:00
|
|
|
release_fpu(regs);
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
2007-11-22 15:30:50 +07:00
|
|
|
|
2008-09-21 17:04:55 +07:00
|
|
|
static inline int init_fpu(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
if (tsk_used_math(tsk)) {
|
|
|
|
if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
|
|
|
|
unlazy_fpu(tsk, task_pt_regs(tsk));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_stopped_child_used_math(tsk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-22 15:30:50 +07:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* __ASM_SH_FPU_H */
|