2015-07-04 02:44:23 +07:00
|
|
|
/*
|
|
|
|
* common.c - C code for kernel entry and exit
|
|
|
|
* Copyright (c) 2015 Andrew Lutomirski
|
|
|
|
* GPL v2
|
|
|
|
*
|
|
|
|
* Based on asm and ptrace code by many authors. The code here originated
|
|
|
|
* in ptrace.c and signal.c.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-09 00:51:37 +07:00
|
|
|
#include <linux/sched/task_stack.h>
|
2015-07-04 02:44:23 +07:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/tracehook.h>
|
|
|
|
#include <linux/audit.h>
|
|
|
|
#include <linux/seccomp.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/context_tracking.h>
|
|
|
|
#include <linux/user-return-notifier.h>
|
2018-01-30 08:02:59 +07:00
|
|
|
#include <linux/nospec.h>
|
2015-07-04 02:44:23 +07:00
|
|
|
#include <linux/uprobes.h>
|
2017-02-14 08:42:31 +07:00
|
|
|
#include <linux/livepatch.h>
|
2017-06-15 08:12:01 +07:00
|
|
|
#include <linux/syscalls.h>
|
2015-07-04 02:44:23 +07:00
|
|
|
|
|
|
|
#include <asm/desc.h>
|
|
|
|
#include <asm/traps.h>
|
2015-10-06 07:48:10 +07:00
|
|
|
#include <asm/vdso.h>
|
2016-12-25 02:46:01 +07:00
|
|
|
#include <linux/uaccess.h>
|
2016-01-27 04:12:04 +07:00
|
|
|
#include <asm/cpufeature.h>
|
2015-07-04 02:44:23 +07:00
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/syscalls.h>
|
|
|
|
|
2015-07-04 02:44:25 +07:00
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
|
|
/* Called on entry from user mode with IRQs off. */
|
2016-06-20 21:58:30 +07:00
|
|
|
__visible inline void enter_from_user_mode(void)
|
2015-07-04 02:44:25 +07:00
|
|
|
{
|
|
|
|
CT_WARN_ON(ct_state() != CONTEXT_USER);
|
x86/entry: Avoid interrupt flag save and restore
Thanks to all the work that was done by Andy Lutomirski and others,
enter_from_user_mode() and prepare_exit_to_usermode() are now called only with
interrupts disabled. Let's provide them a version of user_enter()/user_exit()
that skips saving and restoring the interrupt flag.
On an AMD-based machine I tested this patch on, with force-enabled
context tracking, the speed-up in system calls was 90 clock cycles or 6%,
measured with the following simple benchmark:
#include <sys/signal.h>
#include <time.h>
#include <unistd.h>
#include <stdio.h>
unsigned long rdtsc()
{
unsigned long result;
asm volatile("rdtsc; shl $32, %%rdx; mov %%eax, %%eax\n"
"or %%rdx, %%rax" : "=a" (result) : : "rdx");
return result;
}
int main()
{
unsigned long tsc1, tsc2;
int pid = getpid();
int i;
tsc1 = rdtsc();
for (i = 0; i < 100000000; i++)
kill(pid, SIGWINCH);
tsc2 = rdtsc();
printf("%ld\n", tsc2 - tsc1);
}
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Link: http://lkml.kernel.org/r/1466434712-31440-2-git-send-email-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-06-20 21:58:29 +07:00
|
|
|
user_exit_irqoff();
|
2015-07-04 02:44:25 +07:00
|
|
|
}
|
2016-03-10 04:24:33 +07:00
|
|
|
#else
|
|
|
|
static inline void enter_from_user_mode(void) {}
|
2015-07-04 02:44:25 +07:00
|
|
|
#endif
|
|
|
|
|
2015-07-04 02:44:23 +07:00
|
|
|
static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (arch == AUDIT_ARCH_X86_64) {
|
|
|
|
audit_syscall_entry(regs->orig_ax, regs->di,
|
|
|
|
regs->si, regs->dx, regs->r10);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
audit_syscall_entry(regs->orig_ax, regs->bx,
|
|
|
|
regs->cx, regs->dx, regs->si);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-28 03:08:59 +07:00
|
|
|
* Returns the syscall nr to run (which should match regs->orig_ax) or -1
|
|
|
|
* to skip the syscall.
|
2015-07-04 02:44:23 +07:00
|
|
|
*/
|
2016-05-28 03:08:59 +07:00
|
|
|
static long syscall_trace_enter(struct pt_regs *regs)
|
2015-07-04 02:44:23 +07:00
|
|
|
{
|
2016-05-28 03:08:59 +07:00
|
|
|
u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
|
|
|
|
|
2016-09-14 04:29:22 +07:00
|
|
|
struct thread_info *ti = current_thread_info();
|
2015-07-04 02:44:23 +07:00
|
|
|
unsigned long ret = 0;
|
2016-06-10 02:36:50 +07:00
|
|
|
bool emulated = false;
|
2015-07-04 02:44:23 +07:00
|
|
|
u32 work;
|
|
|
|
|
2015-10-06 07:48:21 +07:00
|
|
|
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
|
|
|
BUG_ON(regs != task_pt_regs(current));
|
2015-07-04 02:44:23 +07:00
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 04:07:29 +07:00
|
|
|
work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
|
2015-07-04 02:44:23 +07:00
|
|
|
|
2016-06-10 02:36:50 +07:00
|
|
|
if (unlikely(work & _TIF_SYSCALL_EMU))
|
|
|
|
emulated = true;
|
|
|
|
|
|
|
|
if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
|
|
|
|
tracehook_report_syscall_entry(regs))
|
|
|
|
return -1L;
|
|
|
|
|
|
|
|
if (emulated)
|
|
|
|
return -1L;
|
|
|
|
|
2015-07-04 02:44:23 +07:00
|
|
|
#ifdef CONFIG_SECCOMP
|
|
|
|
/*
|
2016-06-10 02:36:50 +07:00
|
|
|
* Do seccomp after ptrace, to catch any tracer changes.
|
2015-07-04 02:44:23 +07:00
|
|
|
*/
|
|
|
|
if (work & _TIF_SECCOMP) {
|
|
|
|
struct seccomp_data sd;
|
|
|
|
|
|
|
|
sd.arch = arch;
|
|
|
|
sd.nr = regs->orig_ax;
|
|
|
|
sd.instruction_pointer = regs->ip;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (arch == AUDIT_ARCH_X86_64) {
|
|
|
|
sd.args[0] = regs->di;
|
|
|
|
sd.args[1] = regs->si;
|
|
|
|
sd.args[2] = regs->dx;
|
|
|
|
sd.args[3] = regs->r10;
|
|
|
|
sd.args[4] = regs->r8;
|
|
|
|
sd.args[5] = regs->r9;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
sd.args[0] = regs->bx;
|
|
|
|
sd.args[1] = regs->cx;
|
|
|
|
sd.args[2] = regs->dx;
|
|
|
|
sd.args[3] = regs->si;
|
|
|
|
sd.args[4] = regs->di;
|
|
|
|
sd.args[5] = regs->bp;
|
|
|
|
}
|
|
|
|
|
2016-05-28 03:08:59 +07:00
|
|
|
ret = __secure_computing(&sd);
|
|
|
|
if (ret == -1)
|
|
|
|
return ret;
|
2015-07-04 02:44:23 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
|
|
|
trace_sys_enter(regs, regs->orig_ax);
|
|
|
|
|
|
|
|
do_audit_syscall_entry(regs, arch);
|
|
|
|
|
|
|
|
return ret ?: regs->orig_ax;
|
|
|
|
}
|
|
|
|
|
2015-10-06 07:48:23 +07:00
|
|
|
#define EXIT_TO_USERMODE_LOOP_FLAGS \
|
|
|
|
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
2017-02-14 08:42:31 +07:00
|
|
|
_TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
|
2015-10-06 07:47:54 +07:00
|
|
|
|
2015-10-06 07:48:23 +07:00
|
|
|
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
|
|
|
|
{
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
/*
|
|
|
|
* In order to return to user mode, we need to have IRQs off with
|
2017-02-14 08:42:31 +07:00
|
|
|
* none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
* can be set at any time on preemptable kernels if we have IRQs on,
|
|
|
|
* so we need to loop. Disabling preemption wouldn't help: doing the
|
|
|
|
* work to clear some of the flags can sleep.
|
|
|
|
*/
|
|
|
|
while (true) {
|
|
|
|
/* We have work to do. */
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
if (cached_flags & _TIF_NEED_RESCHED)
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
if (cached_flags & _TIF_UPROBE)
|
|
|
|
uprobe_notify_resume(regs);
|
|
|
|
|
livepatch: send a fake signal to all blocking tasks
Live patching consistency model is of LEAVE_PATCHED_SET and
SWITCH_THREAD. This means that all tasks in the system have to be marked
one by one as safe to call a new patched function. Safe means when a
task is not (sleeping) in a set of patched functions. That is, no
patched function is on the task's stack. Another clearly safe place is
the boundary between kernel and userspace. The patching waits for all
tasks to get outside of the patched set or to cross the boundary. The
transition is completed afterwards.
The problem is that a task can block the transition for quite a long
time, if not forever. It could sleep in a set of patched functions, for
example. Luckily we can force the task to leave the set by sending it a
fake signal, that is a signal with no data in signal pending structures
(no handler, no sign of proper signal delivered). Suspend/freezer use
this to freeze the tasks as well. The task gets TIF_SIGPENDING set and
is woken up (if it has been sleeping in the kernel before) or kicked by
rescheduling IPI (if it was running on other CPU). This causes the task
to go to kernel/userspace boundary where the signal would be handled and
the task would be marked as safe in terms of live patching.
There are tasks which are not affected by this technique though. The
fake signal is not sent to kthreads. They should be handled differently.
They can be woken up so they leave the patched set and their
TIF_PATCH_PENDING can be cleared thanks to stack checking.
For the sake of completeness, if the task is in TASK_RUNNING state but
not currently running on some CPU it doesn't get the IPI, but it would
eventually handle the signal anyway. Second, if the task runs in the
kernel (in TASK_RUNNING state) it gets the IPI, but the signal is not
handled on return from the interrupt. It would be handled on return to
the userspace in the future when the fake signal is sent again. Stack
checking deals with these cases in a better way.
If the task was sleeping in a syscall it would be woken by our fake
signal, it would check if TIF_SIGPENDING is set (by calling
signal_pending() predicate) and return ERESTART* or EINTR. Syscalls with
ERESTART* return values are restarted in case of the fake signal (see
do_signal()). EINTR is propagated back to the userspace program. This
could disturb the program, but...
* each process dealing with signals should react accordingly to EINTR
return values.
* syscalls returning EINTR happen to be quite common situation in the
system even if no fake signal is sent.
* freezer sends the fake signal and does not deal with EINTR anyhow.
Thus EINTR values are returned when the system is resumed.
The very safe marking is done in architectures' "entry" on syscall and
interrupt/exception exit paths, and in a stack checking functions of
livepatch. TIF_PATCH_PENDING is cleared and the next
recalc_sigpending() drops TIF_SIGPENDING. In connection with this, also
call klp_update_patch_state() before do_signal(), so that
recalc_sigpending() in dequeue_signal() can clear TIF_PATCH_PENDING
immediately and thus prevent a double call of do_signal().
Note that the fake signal is not sent to stopped/traced tasks. Such task
prevents the patching to finish till it continues again (is not traced
anymore).
Last, sending the fake signal is not automatic. It is done only when
admin requests it by writing 1 to signal sysfs attribute in livepatch
sysfs directory.
Signed-off-by: Miroslav Benes <mbenes@suse.cz>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: x86@kernel.org
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2017-11-15 20:50:13 +07:00
|
|
|
if (cached_flags & _TIF_PATCH_PENDING)
|
|
|
|
klp_update_patch_state(current);
|
|
|
|
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
/* deal with pending signal delivery */
|
|
|
|
if (cached_flags & _TIF_SIGPENDING)
|
|
|
|
do_signal(regs);
|
|
|
|
|
|
|
|
if (cached_flags & _TIF_NOTIFY_RESUME) {
|
|
|
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
|
|
|
tracehook_notify_resume(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
|
|
|
|
fire_user_return_notifiers();
|
|
|
|
|
|
|
|
/* Disable IRQs and retry */
|
|
|
|
local_irq_disable();
|
2015-10-06 07:48:23 +07:00
|
|
|
|
2016-09-14 04:29:22 +07:00
|
|
|
cached_flags = READ_ONCE(current_thread_info()->flags);
|
2015-10-06 07:48:23 +07:00
|
|
|
|
|
|
|
if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
|
|
|
|
break;
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
}
|
2015-10-06 07:48:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called with IRQs disabled. */
|
|
|
|
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
|
|
|
|
{
|
2016-09-14 04:29:22 +07:00
|
|
|
struct thread_info *ti = current_thread_info();
|
2015-10-06 07:48:23 +07:00
|
|
|
u32 cached_flags;
|
|
|
|
|
2017-06-15 08:12:01 +07:00
|
|
|
addr_limit_user_check();
|
|
|
|
|
2017-11-06 22:01:23 +07:00
|
|
|
lockdep_assert_irqs_disabled();
|
2015-10-06 07:48:23 +07:00
|
|
|
lockdep_sys_exit();
|
|
|
|
|
2016-02-11 05:15:27 +07:00
|
|
|
cached_flags = READ_ONCE(ti->flags);
|
2015-10-06 07:48:23 +07:00
|
|
|
|
|
|
|
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
|
|
|
|
exit_to_usermode_loop(regs, cached_flags);
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
|
2016-02-11 05:15:27 +07:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
/*
|
|
|
|
* Compat syscalls set TS_COMPAT. Make sure we clear it before
|
|
|
|
* returning to user mode. We need to clear it *after* signal
|
|
|
|
* handling, because syscall restart has a fixup for compat
|
|
|
|
* syscalls. The fixup is exercised by the ptrace_syscall_32
|
|
|
|
* selftest.
|
2016-07-27 13:12:22 +07:00
|
|
|
*
|
|
|
|
* We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
|
|
|
|
* special case only applies after poking regs and before the
|
|
|
|
* very next return to user mode.
|
2016-02-11 05:15:27 +07:00
|
|
|
*/
|
2018-01-29 01:38:50 +07:00
|
|
|
ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
|
2016-02-11 05:15:27 +07:00
|
|
|
#endif
|
|
|
|
|
x86/entry: Avoid interrupt flag save and restore
Thanks to all the work that was done by Andy Lutomirski and others,
enter_from_user_mode() and prepare_exit_to_usermode() are now called only with
interrupts disabled. Let's provide them a version of user_enter()/user_exit()
that skips saving and restoring the interrupt flag.
On an AMD-based machine I tested this patch on, with force-enabled
context tracking, the speed-up in system calls was 90 clock cycles or 6%,
measured with the following simple benchmark:
#include <sys/signal.h>
#include <time.h>
#include <unistd.h>
#include <stdio.h>
unsigned long rdtsc()
{
unsigned long result;
asm volatile("rdtsc; shl $32, %%rdx; mov %%eax, %%eax\n"
"or %%rdx, %%rax" : "=a" (result) : : "rdx");
return result;
}
int main()
{
unsigned long tsc1, tsc2;
int pid = getpid();
int i;
tsc1 = rdtsc();
for (i = 0; i < 100000000; i++)
kill(pid, SIGWINCH);
tsc2 = rdtsc();
printf("%ld\n", tsc2 - tsc1);
}
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Link: http://lkml.kernel.org/r/1466434712-31440-2-git-send-email-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-06-20 21:58:29 +07:00
|
|
|
user_enter_irqoff();
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
}
|
|
|
|
|
2015-10-06 07:48:24 +07:00
|
|
|
#define SYSCALL_EXIT_WORK_FLAGS \
|
|
|
|
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
|
|
|
_TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
|
|
|
|
|
|
|
|
static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
|
|
|
|
{
|
|
|
|
bool step;
|
|
|
|
|
|
|
|
audit_syscall_exit(regs);
|
|
|
|
|
|
|
|
if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
|
|
|
|
trace_sys_exit(regs, regs->ax);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If TIF_SYSCALL_EMU is set, we only get here because of
|
|
|
|
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
|
|
|
|
* We already reported this syscall instruction in
|
|
|
|
* syscall_trace_enter().
|
|
|
|
*/
|
|
|
|
step = unlikely(
|
|
|
|
(cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
|
|
|
|
== _TIF_SINGLESTEP);
|
|
|
|
if (step || cached_flags & _TIF_SYSCALL_TRACE)
|
|
|
|
tracehook_report_syscall_exit(regs, step);
|
|
|
|
}
|
|
|
|
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
/*
|
|
|
|
* Called with IRQs on and fully valid regs. Returns with IRQs off in a
|
|
|
|
* state such that we can immediately switch to user mode.
|
|
|
|
*/
|
2015-10-06 07:48:24 +07:00
|
|
|
__visible inline void syscall_return_slowpath(struct pt_regs *regs)
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
{
|
2016-09-14 04:29:22 +07:00
|
|
|
struct thread_info *ti = current_thread_info();
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
u32 cached_flags = READ_ONCE(ti->flags);
|
|
|
|
|
|
|
|
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
|
|
|
|
|
2015-10-06 07:48:18 +07:00
|
|
|
if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
|
|
|
|
WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First do one-time work. If these work items are enabled, we
|
|
|
|
* want to run them exactly once per syscall exit with IRQs on.
|
|
|
|
*/
|
2015-10-06 07:48:24 +07:00
|
|
|
if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
|
|
|
|
syscall_slow_exit_work(regs, cached_flags);
|
x86/entry: Add new, comprehensible entry and exit handlers written in C
The current x86 entry and exit code, written in a mixture of assembly and
C code, is incomprehensible due to being open-coded in a lot of places
without coherent documentation.
It appears to work primary by luck and duct tape: i.e. obvious runtime
failures were fixed on-demand, without re-thinking the design.
Due to those reasons our confidence level in that code is low, and it is
very difficult to incrementally improve.
Add new code written in C, in preparation for simply deleting the old
entry code.
prepare_exit_to_usermode() is a new function that will handle all
slow path exits to user mode. It is called with IRQs disabled
and it leaves us in a state in which it is safe to immediately
return to user mode. IRQs must not be re-enabled at any point
after prepare_exit_to_usermode() returns and user mode is actually
entered. (We can, of course, fail to enter user mode and treat
that failure as a fresh entry to kernel mode.)
All callers of do_notify_resume() will be migrated to call
prepare_exit_to_usermode() instead; prepare_exit_to_usermode() needs
to do everything that do_notify_resume() does today, but it also
takes care of scheduling and context tracking. Unlike
do_notify_resume(), it does not need to be called in a loop.
syscall_return_slowpath() is exactly what it sounds like: it will
be called on any syscall exit slow path. It will replace
syscall_trace_leave() and it calls prepare_exit_to_usermode() on the
way out.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/c57c8b87661a4152801d7d3786eac2d1a2f209dd.1435952415.git.luto@kernel.org
[ Improved the changelog a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-07-04 02:44:26 +07:00
|
|
|
|
|
|
|
local_irq_disable();
|
|
|
|
prepare_exit_to_usermode(regs);
|
|
|
|
}
|
2015-10-06 07:48:08 +07:00
|
|
|
|
2016-01-29 06:11:28 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2018-04-05 16:53:00 +07:00
|
|
|
__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
|
2016-01-29 06:11:28 +07:00
|
|
|
{
|
2018-04-05 16:53:00 +07:00
|
|
|
struct thread_info *ti;
|
2016-01-29 06:11:28 +07:00
|
|
|
|
2016-03-10 04:24:33 +07:00
|
|
|
enter_from_user_mode();
|
2016-01-29 06:11:28 +07:00
|
|
|
local_irq_enable();
|
2018-04-05 16:53:00 +07:00
|
|
|
ti = current_thread_info();
|
2016-01-29 06:11:28 +07:00
|
|
|
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
|
|
|
|
nr = syscall_trace_enter(regs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NB: Native and x32 syscalls are dispatched from the same
|
|
|
|
* table. The only functional difference is the x32 bit in
|
|
|
|
* regs->orig_ax, which changes the behavior of some syscalls.
|
|
|
|
*/
|
2018-04-05 16:53:00 +07:00
|
|
|
nr &= __SYSCALL_MASK;
|
|
|
|
if (likely(nr < NR_syscalls)) {
|
|
|
|
nr = array_index_nospec(nr, NR_syscalls);
|
syscalls/x86: Use 'struct pt_regs' based syscall calling convention for 64-bit syscalls
Let's make use of ARCH_HAS_SYSCALL_WRAPPER=y on pure 64-bit x86-64 systems:
Each syscall defines a stub which takes struct pt_regs as its only
argument. It decodes just those parameters it needs, e.g:
asmlinkage long sys_xyzzy(const struct pt_regs *regs)
{
return SyS_xyzzy(regs->di, regs->si, regs->dx);
}
This approach avoids leaking random user-provided register content down
the call chain.
For example, for sys_recv() which is a 4-parameter syscall, the assembly
now is (in slightly reordered fashion):
<sys_recv>:
callq <__fentry__>
/* decode regs->di, ->si, ->dx and ->r10 */
mov 0x70(%rdi),%rdi
mov 0x68(%rdi),%rsi
mov 0x60(%rdi),%rdx
mov 0x38(%rdi),%rcx
[ SyS_recv() is automatically inlined by the compiler,
as it is not [yet] used anywhere else ]
/* clear %r9 and %r8, the 5th and 6th args */
xor %r9d,%r9d
xor %r8d,%r8d
/* do the actual work */
callq __sys_recvfrom
/* cleanup and return */
cltq
retq
The only valid place in an x86-64 kernel which rightfully calls
a syscall function on its own -- vsyscall -- needs to be modified
to pass struct pt_regs onwards as well.
To keep the syscall table generation working independent of
SYSCALL_PTREGS being enabled, the stubs are named the same as the
"original" syscall stubs, i.e. sys_*().
This patch is based on an original proof-of-concept
| From: Linus Torvalds <torvalds@linux-foundation.org>
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
and was split up and heavily modified by me, in particular to base it on
ARCH_HAS_SYSCALL_WRAPPER, to limit it to 64-bit-only for the time being,
and to update the vsyscall to the new calling convention.
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180405095307.3730-4-linux@dominikbrodowski.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-04-05 16:53:02 +07:00
|
|
|
regs->ax = sys_call_table[nr](regs);
|
2016-01-29 06:11:28 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
syscall_return_slowpath(regs);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-10-06 07:48:08 +07:00
|
|
|
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
|
|
|
/*
|
2016-03-10 04:24:33 +07:00
|
|
|
* Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
|
|
|
|
* all entry and exit work and returns with IRQs off. This function is
|
|
|
|
* extremely hot in workloads that use it, and it's usually called from
|
2015-10-06 07:48:19 +07:00
|
|
|
* do_fast_syscall_32, so forcibly inline it to improve performance.
|
2015-10-06 07:48:08 +07:00
|
|
|
*/
|
2016-03-10 04:24:32 +07:00
|
|
|
static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
|
2015-10-06 07:48:08 +07:00
|
|
|
{
|
2016-09-14 04:29:22 +07:00
|
|
|
struct thread_info *ti = current_thread_info();
|
2015-10-06 07:48:08 +07:00
|
|
|
unsigned int nr = (unsigned int)regs->orig_ax;
|
|
|
|
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
2018-01-29 01:38:50 +07:00
|
|
|
ti->status |= TS_COMPAT;
|
2015-10-06 07:48:08 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
|
|
|
|
/*
|
|
|
|
* Subtlety here: if ptrace pokes something larger than
|
|
|
|
* 2^32-1 into orig_ax, this truncates it. This may or
|
|
|
|
* may not be necessary, but it matches the old asm
|
|
|
|
* behavior.
|
|
|
|
*/
|
|
|
|
nr = syscall_trace_enter(regs);
|
|
|
|
}
|
|
|
|
|
2015-10-06 07:48:19 +07:00
|
|
|
if (likely(nr < IA32_NR_syscalls)) {
|
2018-01-30 08:02:59 +07:00
|
|
|
nr = array_index_nospec(nr, IA32_NR_syscalls);
|
2018-04-05 16:53:05 +07:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
syscalls/x86: Use 'struct pt_regs' based syscall calling for IA32_EMULATION and x32
Extend ARCH_HAS_SYSCALL_WRAPPER for i386 emulation and for x32 on 64-bit
x86.
For x32, all we need to do is to create an additional stub for each
compat syscall which decodes the parameters in x86-64 ordering, e.g.:
asmlinkage long __compat_sys_x32_xyzzy(struct pt_regs *regs)
{
return c_SyS_xyzzy(regs->di, regs->si, regs->dx);
}
For i386 emulation, we need to teach compat_sys_*() to take struct
pt_regs as its only argument, e.g.:
asmlinkage long __compat_sys_ia32_xyzzy(struct pt_regs *regs)
{
return c_SyS_xyzzy(regs->bx, regs->cx, regs->dx);
}
In addition, we need to create additional stubs for common syscalls
(that is, for syscalls which have the same parameters on 32-bit and
64-bit), e.g.:
asmlinkage long __sys_ia32_xyzzy(struct pt_regs *regs)
{
return c_sys_xyzzy(regs->bx, regs->cx, regs->dx);
}
This approach avoids leaking random user-provided register content down
the call chain.
This patch is based on an original proof-of-concept
| From: Linus Torvalds <torvalds@linux-foundation.org>
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
and was split up and heavily modified by me, in particular to base it on
ARCH_HAS_SYSCALL_WRAPPER.
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180405095307.3730-6-linux@dominikbrodowski.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-04-05 16:53:04 +07:00
|
|
|
regs->ax = ia32_sys_call_table[nr](regs);
|
|
|
|
#else
|
2015-10-06 07:48:08 +07:00
|
|
|
/*
|
|
|
|
* It's possible that a 32-bit syscall implementation
|
|
|
|
* takes a 64-bit parameter but nonetheless assumes that
|
|
|
|
* the high bits are zero. Make sure we zero-extend all
|
|
|
|
* of the args.
|
|
|
|
*/
|
|
|
|
regs->ax = ia32_sys_call_table[nr](
|
|
|
|
(unsigned int)regs->bx, (unsigned int)regs->cx,
|
|
|
|
(unsigned int)regs->dx, (unsigned int)regs->si,
|
|
|
|
(unsigned int)regs->di, (unsigned int)regs->bp);
|
2018-04-05 16:53:05 +07:00
|
|
|
#endif /* CONFIG_IA32_EMULATION */
|
2015-10-06 07:48:08 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
syscall_return_slowpath(regs);
|
|
|
|
}
|
2015-10-06 07:48:10 +07:00
|
|
|
|
2016-03-10 04:24:32 +07:00
|
|
|
/* Handles int $0x80 */
|
|
|
|
__visible void do_int80_syscall_32(struct pt_regs *regs)
|
2015-10-06 07:48:17 +07:00
|
|
|
{
|
2016-03-10 04:24:33 +07:00
|
|
|
enter_from_user_mode();
|
2015-10-06 07:48:17 +07:00
|
|
|
local_irq_enable();
|
|
|
|
do_syscall_32_irqs_on(regs);
|
|
|
|
}
|
|
|
|
|
2015-10-06 07:48:15 +07:00
|
|
|
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
|
2015-10-06 07:48:12 +07:00
|
|
|
__visible long do_fast_syscall_32(struct pt_regs *regs)
|
2015-10-06 07:48:10 +07:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Called using the internal vDSO SYSENTER/SYSCALL32 calling
|
|
|
|
* convention. Adjust regs so it looks like we entered using int80.
|
|
|
|
*/
|
|
|
|
|
|
|
|
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
|
|
|
|
vdso_image_32.sym_int80_landing_pad;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
|
|
|
|
* so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
|
|
|
|
* Fix it up.
|
|
|
|
*/
|
|
|
|
regs->ip = landing_pad;
|
|
|
|
|
2016-03-10 04:24:33 +07:00
|
|
|
enter_from_user_mode();
|
|
|
|
|
2015-10-06 07:48:10 +07:00
|
|
|
local_irq_enable();
|
2016-03-10 04:24:33 +07:00
|
|
|
|
|
|
|
/* Fetch EBP from where the vDSO stashed it. */
|
2015-10-06 07:48:20 +07:00
|
|
|
if (
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
|
|
* Micro-optimization: the pointer we're following is explicitly
|
|
|
|
* 32 bits, so it can't be out of range.
|
|
|
|
*/
|
2015-12-17 14:18:48 +07:00
|
|
|
__get_user(*(u32 *)®s->bp,
|
2015-10-06 07:48:20 +07:00
|
|
|
(u32 __user __force *)(unsigned long)(u32)regs->sp)
|
|
|
|
#else
|
2015-12-17 14:18:48 +07:00
|
|
|
get_user(*(u32 *)®s->bp,
|
2015-10-06 07:48:20 +07:00
|
|
|
(u32 __user __force *)(unsigned long)(u32)regs->sp)
|
|
|
|
#endif
|
|
|
|
) {
|
|
|
|
|
2015-10-06 07:48:10 +07:00
|
|
|
/* User code screwed up. */
|
|
|
|
local_irq_disable();
|
|
|
|
regs->ax = -EFAULT;
|
|
|
|
prepare_exit_to_usermode(regs);
|
2015-10-06 07:48:12 +07:00
|
|
|
return 0; /* Keep it simple: use IRET. */
|
2015-10-06 07:48:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now this is just like a normal syscall. */
|
2015-10-06 07:48:17 +07:00
|
|
|
do_syscall_32_irqs_on(regs);
|
2015-10-06 07:48:12 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
|
|
* Opportunistic SYSRETL: if possible, try to return using SYSRETL.
|
|
|
|
* SYSRETL is available on all 64-bit CPUs, so we don't need to
|
|
|
|
* bother with SYSEXIT.
|
|
|
|
*
|
|
|
|
* Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
|
|
|
|
* because the ECX fixup above will ensure that this is essentially
|
|
|
|
* never the case.
|
|
|
|
*/
|
|
|
|
return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
|
|
|
|
regs->ip == landing_pad &&
|
|
|
|
(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
|
|
|
|
#else
|
2015-10-06 07:48:15 +07:00
|
|
|
/*
|
|
|
|
* Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
|
|
|
|
*
|
|
|
|
* Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
|
|
|
|
* because the ECX fixup above will ensure that this is essentially
|
|
|
|
* never the case.
|
|
|
|
*
|
|
|
|
* We don't allow syscalls at all from VM86 mode, but we still
|
|
|
|
* need to check VM, because we might be returning from sys_vm86.
|
|
|
|
*/
|
|
|
|
return static_cpu_has(X86_FEATURE_SEP) &&
|
|
|
|
regs->cs == __USER_CS && regs->ss == __USER_DS &&
|
|
|
|
regs->ip == landing_pad &&
|
|
|
|
(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
|
2015-10-06 07:48:12 +07:00
|
|
|
#endif
|
2015-10-06 07:48:10 +07:00
|
|
|
}
|
2015-10-06 07:48:08 +07:00
|
|
|
#endif
|