mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-23 21:50:50 +07:00
kprobes: Fix to delay the kprobes jump optimization
commit c85c9a2c6e368dc94907e63babb18a9788e5c9b6 upstream. Commit36dadef23f
("kprobes: Init kprobes in early_initcall") moved the kprobe setup in early_initcall(), which includes kprobe jump optimization. The kprobes jump optimizer involves synchronize_rcu_tasks() which depends on the ksoftirqd and rcu_spawn_tasks_*(). However, since those are setup in core_initcall(), kprobes jump optimizer can not run at the early_initcall(). To avoid this issue, make the kprobe optimization disabled in the early_initcall() and enables it in subsys_initcall(). Note that non-optimized kprobes is still available after early_initcall(). Only jump optimization is delayed. Link: https://lkml.kernel.org/r/161365856280.719838.12423085451287256713.stgit@devnote2 Fixes:36dadef23f
("kprobes: Init kprobes in early_initcall") Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: RCU <rcu@vger.kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Daniel Axtens <dja@axtens.net> Cc: Frederic Weisbecker <frederic@kernel.org> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Michal Hocko <mhocko@suse.com> Cc: "Theodore Y . Ts'o" <tytso@mit.edu> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com> Cc: stable@vger.kernel.org Reported-by: Paul E. McKenney <paulmck@kernel.org> Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reported-by: Uladzislau Rezki <urezki@gmail.com> Acked-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e713bdd791
commit
c9b33f7cbe
@ -871,7 +871,6 @@ static void try_to_optimize_kprobe(struct kprobe *p)
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static void optimize_all_kprobes(void)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
@ -897,6 +896,7 @@ static void optimize_all_kprobes(void)
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static void unoptimize_all_kprobes(void)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
@ -2627,18 +2627,14 @@ static int __init init_kprobes(void)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_OPTPROBES)
|
||||
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
|
||||
/* Init kprobe_optinsn_slots */
|
||||
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
|
||||
#endif
|
||||
/* By default, kprobes can be optimized */
|
||||
kprobes_allow_optimization = true;
|
||||
#endif
|
||||
|
||||
/* By default, kprobes are armed */
|
||||
kprobes_all_disarmed = false;
|
||||
|
||||
#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
|
||||
/* Init kprobe_optinsn_slots for allocation */
|
||||
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
|
||||
#endif
|
||||
|
||||
err = arch_init_kprobes();
|
||||
if (!err)
|
||||
err = register_die_notifier(&kprobe_exceptions_nb);
|
||||
@ -2653,6 +2649,21 @@ static int __init init_kprobes(void)
|
||||
}
|
||||
early_initcall(init_kprobes);
|
||||
|
||||
#if defined(CONFIG_OPTPROBES)
|
||||
static int __init init_optprobes(void)
|
||||
{
|
||||
/*
|
||||
* Enable kprobe optimization - this kicks the optimizer which
|
||||
* depends on synchronize_rcu_tasks() and ksoftirqd, that is
|
||||
* not spawned in early initcall. So delay the optimization.
|
||||
*/
|
||||
optimize_all_kprobes();
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(init_optprobes);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void report_probe(struct seq_file *pi, struct kprobe *p,
|
||||
const char *sym, int offset, char *modname, struct kprobe *pp)
|
||||
|
Loading…
Reference in New Issue
Block a user