mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 02:16:37 +07:00
99e5ada940
When a task is scheduled in, it may have some properties of its own that could make the CPU reconsider the need for the tick: posix cpu timers, perf events, ... So notify the full dynticks subsystem when a task gets scheduled in and re-check the tick dependency at this stage. This is done through a self IPI to avoid messing up with any current lock scenario. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
184 lines
5.6 KiB
C
184 lines
5.6 KiB
C
/* linux/include/linux/tick.h
|
|
*
|
|
* This file contains the structure definitions for tick related functions
|
|
*
|
|
*/
|
|
#ifndef _LINUX_TICK_H
|
|
#define _LINUX_TICK_H
|
|
|
|
#include <linux/clockchips.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hrtimer.h>
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
|
|
|
enum tick_device_mode {
|
|
TICKDEV_MODE_PERIODIC,
|
|
TICKDEV_MODE_ONESHOT,
|
|
};
|
|
|
|
struct tick_device {
|
|
struct clock_event_device *evtdev;
|
|
enum tick_device_mode mode;
|
|
};
|
|
|
|
enum tick_nohz_mode {
|
|
NOHZ_MODE_INACTIVE,
|
|
NOHZ_MODE_LOWRES,
|
|
NOHZ_MODE_HIGHRES,
|
|
};
|
|
|
|
/**
|
|
* struct tick_sched - sched tick emulation and no idle tick control/stats
|
|
* @sched_timer: hrtimer to schedule the periodic tick in high
|
|
* resolution mode
|
|
* @last_tick: Store the last tick expiry time when the tick
|
|
* timer is modified for nohz sleeps. This is necessary
|
|
* to resume the tick timer operation in the timeline
|
|
* when the CPU returns from nohz sleep.
|
|
* @tick_stopped: Indicator that the idle tick has been stopped
|
|
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
|
|
* @idle_calls: Total number of idle calls
|
|
* @idle_sleeps: Number of idle calls, where the sched tick was stopped
|
|
* @idle_entrytime: Time when the idle call was entered
|
|
* @idle_waketime: Time when the idle was interrupted
|
|
* @idle_exittime: Time when the idle state was left
|
|
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
|
|
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
|
|
* @sleep_length: Duration of the current idle sleep
|
|
* @do_timer_lst: CPU was the last one doing do_timer before going idle
|
|
*/
|
|
struct tick_sched {
|
|
struct hrtimer sched_timer;
|
|
unsigned long check_clocks;
|
|
enum tick_nohz_mode nohz_mode;
|
|
ktime_t last_tick;
|
|
int inidle;
|
|
int tick_stopped;
|
|
unsigned long idle_jiffies;
|
|
unsigned long idle_calls;
|
|
unsigned long idle_sleeps;
|
|
int idle_active;
|
|
ktime_t idle_entrytime;
|
|
ktime_t idle_waketime;
|
|
ktime_t idle_exittime;
|
|
ktime_t idle_sleeptime;
|
|
ktime_t iowait_sleeptime;
|
|
ktime_t sleep_length;
|
|
unsigned long last_jiffies;
|
|
unsigned long next_jiffies;
|
|
ktime_t idle_expires;
|
|
int do_timer_last;
|
|
};
|
|
|
|
extern void __init tick_init(void);
|
|
extern int tick_is_oneshot_available(void);
|
|
extern struct tick_device *tick_get_device(int cpu);
|
|
|
|
# ifdef CONFIG_HIGH_RES_TIMERS
|
|
extern int tick_init_highres(void);
|
|
extern int tick_program_event(ktime_t expires, int force);
|
|
extern void tick_setup_sched_timer(void);
|
|
# endif
|
|
|
|
# if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
|
|
extern void tick_cancel_sched_timer(int cpu);
|
|
# else
|
|
static inline void tick_cancel_sched_timer(int cpu) { }
|
|
# endif
|
|
|
|
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
extern struct tick_device *tick_get_broadcast_device(void);
|
|
extern struct cpumask *tick_get_broadcast_mask(void);
|
|
|
|
# ifdef CONFIG_TICK_ONESHOT
|
|
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
|
# endif
|
|
|
|
# endif /* BROADCAST */
|
|
|
|
# ifdef CONFIG_TICK_ONESHOT
|
|
extern void tick_clock_notify(void);
|
|
extern int tick_check_oneshot_change(int allow_nohz);
|
|
extern struct tick_sched *tick_get_tick_sched(int cpu);
|
|
extern void tick_check_idle(int cpu);
|
|
extern int tick_oneshot_mode_active(void);
|
|
# ifndef arch_needs_cpu
|
|
# define arch_needs_cpu(cpu) (0)
|
|
# endif
|
|
# else
|
|
static inline void tick_clock_notify(void) { }
|
|
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
|
static inline void tick_check_idle(int cpu) { }
|
|
static inline int tick_oneshot_mode_active(void) { return 0; }
|
|
# endif
|
|
|
|
#else /* CONFIG_GENERIC_CLOCKEVENTS */
|
|
static inline void tick_init(void) { }
|
|
static inline void tick_cancel_sched_timer(int cpu) { }
|
|
static inline void tick_clock_notify(void) { }
|
|
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
|
static inline void tick_check_idle(int cpu) { }
|
|
static inline int tick_oneshot_mode_active(void) { return 0; }
|
|
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
|
|
|
# ifdef CONFIG_NO_HZ_COMMON
|
|
DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
|
|
|
static inline int tick_nohz_tick_stopped(void)
|
|
{
|
|
return __this_cpu_read(tick_cpu_sched.tick_stopped);
|
|
}
|
|
|
|
extern void tick_nohz_idle_enter(void);
|
|
extern void tick_nohz_idle_exit(void);
|
|
extern void tick_nohz_irq_exit(void);
|
|
extern ktime_t tick_nohz_get_sleep_length(void);
|
|
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
|
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
|
|
|
# else /* !CONFIG_NO_HZ_COMMON */
|
|
static inline int tick_nohz_tick_stopped(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void tick_nohz_idle_enter(void) { }
|
|
static inline void tick_nohz_idle_exit(void) { }
|
|
|
|
static inline ktime_t tick_nohz_get_sleep_length(void)
|
|
{
|
|
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
|
|
|
|
return len;
|
|
}
|
|
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
|
|
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
|
# endif /* !CONFIG_NO_HZ_COMMON */
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
extern void tick_nohz_init(void);
|
|
extern int tick_nohz_full_cpu(int cpu);
|
|
extern void tick_nohz_full_check(void);
|
|
extern void tick_nohz_full_kick(void);
|
|
extern void tick_nohz_full_kick_all(void);
|
|
extern void tick_nohz_task_switch(struct task_struct *tsk);
|
|
#else
|
|
static inline void tick_nohz_init(void) { }
|
|
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
|
|
static inline void tick_nohz_full_check(void) { }
|
|
static inline void tick_nohz_full_kick(void) { }
|
|
static inline void tick_nohz_full_kick_all(void) { }
|
|
static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
|
|
#endif
|
|
|
|
|
|
# ifdef CONFIG_CPU_IDLE_GOV_MENU
|
|
extern void menu_hrtimer_cancel(void);
|
|
# else
|
|
static inline void menu_hrtimer_cancel(void) {}
|
|
# endif /* CONFIG_CPU_IDLE_GOV_MENU */
|
|
|
|
#endif
|