mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 05:20:53 +07:00
perf: Migrate perf to use new tick dependency mask model
Instead of providing asynchronous checks for the nohz subsystem to verify perf event tick dependency, migrate perf to the new mask. Perf needs the tick for two situations: 1) Freq events. We could set the tick dependency when those are installed on a CPU context. But setting a global dependency on top of the global freq events accounting is much easier. If people want that to be optimized, we can still refine that on the per-CPU tick dependency level. This patch dooesn't change the current behaviour anyway. 2) Throttled events: this is a per-cpu dependency. Reviewed-by: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
e6e6cc22e0
commit
555e0c1ef7
@ -1108,12 +1108,6 @@ static inline void perf_event_task_tick(void) { }
|
||||
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
|
||||
extern bool perf_event_can_stop_tick(void);
|
||||
#else
|
||||
static inline bool perf_event_can_stop_tick(void) { return true; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
|
||||
extern void perf_restore_debug_store(void);
|
||||
#else
|
||||
|
@ -233,7 +233,6 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal,
|
||||
tick_nohz_dep_clear_signal(signal, bit);
|
||||
}
|
||||
|
||||
extern void tick_nohz_full_kick(void);
|
||||
extern void tick_nohz_full_kick_cpu(int cpu);
|
||||
extern void tick_nohz_full_kick_all(void);
|
||||
extern void __tick_nohz_task_switch(void);
|
||||
@ -260,7 +259,6 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal,
|
||||
enum tick_dep_bits bit) { }
|
||||
|
||||
static inline void tick_nohz_full_kick_cpu(int cpu) { }
|
||||
static inline void tick_nohz_full_kick(void) { }
|
||||
static inline void tick_nohz_full_kick_all(void) { }
|
||||
static inline void __tick_nohz_task_switch(void) { }
|
||||
#endif
|
||||
|
@ -3060,17 +3060,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
|
||||
return rotate;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
bool perf_event_can_stop_tick(void)
|
||||
{
|
||||
if (atomic_read(&nr_freq_events) ||
|
||||
__this_cpu_read(perf_throttled_count))
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
void perf_event_task_tick(void)
|
||||
{
|
||||
struct list_head *head = this_cpu_ptr(&active_ctx_list);
|
||||
@ -3081,6 +3070,7 @@ void perf_event_task_tick(void)
|
||||
|
||||
__this_cpu_inc(perf_throttled_seq);
|
||||
throttled = __this_cpu_xchg(perf_throttled_count, 0);
|
||||
tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
|
||||
|
||||
list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
|
||||
perf_adjust_freq_unthr_context(ctx, throttled);
|
||||
@ -3511,6 +3501,28 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
||||
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
static DEFINE_SPINLOCK(nr_freq_lock);
|
||||
#endif
|
||||
|
||||
static void unaccount_freq_event_nohz(void)
|
||||
{
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
spin_lock(&nr_freq_lock);
|
||||
if (atomic_dec_and_test(&nr_freq_events))
|
||||
tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
|
||||
spin_unlock(&nr_freq_lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void unaccount_freq_event(void)
|
||||
{
|
||||
if (tick_nohz_full_enabled())
|
||||
unaccount_freq_event_nohz();
|
||||
else
|
||||
atomic_dec(&nr_freq_events);
|
||||
}
|
||||
|
||||
static void unaccount_event(struct perf_event *event)
|
||||
{
|
||||
bool dec = false;
|
||||
@ -3527,7 +3539,7 @@ static void unaccount_event(struct perf_event *event)
|
||||
if (event->attr.task)
|
||||
atomic_dec(&nr_task_events);
|
||||
if (event->attr.freq)
|
||||
atomic_dec(&nr_freq_events);
|
||||
unaccount_freq_event();
|
||||
if (event->attr.context_switch) {
|
||||
dec = true;
|
||||
atomic_dec(&nr_switch_events);
|
||||
@ -6349,9 +6361,9 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
if (unlikely(throttle
|
||||
&& hwc->interrupts >= max_samples_per_tick)) {
|
||||
__this_cpu_inc(perf_throttled_count);
|
||||
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
|
||||
hwc->interrupts = MAX_INTERRUPTS;
|
||||
perf_log_throttle(event, 0);
|
||||
tick_nohz_full_kick();
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
@ -7741,6 +7753,27 @@ static void account_event_cpu(struct perf_event *event, int cpu)
|
||||
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
||||
}
|
||||
|
||||
/* Freq events need the tick to stay alive (see perf_event_task_tick). */
|
||||
static void account_freq_event_nohz(void)
|
||||
{
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
/* Lock so we don't race with concurrent unaccount */
|
||||
spin_lock(&nr_freq_lock);
|
||||
if (atomic_inc_return(&nr_freq_events) == 1)
|
||||
tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
|
||||
spin_unlock(&nr_freq_lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void account_freq_event(void)
|
||||
{
|
||||
if (tick_nohz_full_enabled())
|
||||
account_freq_event_nohz();
|
||||
else
|
||||
atomic_inc(&nr_freq_events);
|
||||
}
|
||||
|
||||
|
||||
static void account_event(struct perf_event *event)
|
||||
{
|
||||
bool inc = false;
|
||||
@ -7756,10 +7789,8 @@ static void account_event(struct perf_event *event)
|
||||
atomic_inc(&nr_comm_events);
|
||||
if (event->attr.task)
|
||||
atomic_inc(&nr_task_events);
|
||||
if (event->attr.freq) {
|
||||
if (atomic_inc_return(&nr_freq_events) == 1)
|
||||
tick_nohz_full_kick_all();
|
||||
}
|
||||
if (event->attr.freq)
|
||||
account_freq_event();
|
||||
if (event->attr.context_switch) {
|
||||
atomic_inc(&nr_switch_events);
|
||||
inc = true;
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/posix-timers.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
@ -215,11 +214,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!perf_event_can_stop_tick()) {
|
||||
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
/*
|
||||
* sched_clock_tick() needs us?
|
||||
@ -257,7 +251,7 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
||||
* This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
|
||||
* is NMI safe.
|
||||
*/
|
||||
void tick_nohz_full_kick(void)
|
||||
static void tick_nohz_full_kick(void)
|
||||
{
|
||||
if (!tick_nohz_full_cpu(smp_processor_id()))
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user