mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 06:50:58 +07:00
Merge branch 'timers/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/urgent
Pull dynticks cleanups from Frederic Weisbecker. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
a2b4c607c9
@ -294,6 +294,12 @@ extern unsigned long preset_lpj;
|
||||
*/
|
||||
extern unsigned int jiffies_to_msecs(const unsigned long j);
|
||||
extern unsigned int jiffies_to_usecs(const unsigned long j);
|
||||
|
||||
static inline u64 jiffies_to_nsecs(const unsigned long j)
|
||||
{
|
||||
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
extern unsigned long msecs_to_jiffies(const unsigned int m);
|
||||
extern unsigned long usecs_to_jiffies(const unsigned int u);
|
||||
extern unsigned long timespec_to_jiffies(const struct timespec *value);
|
||||
|
@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
||||
extern void tick_clock_notify(void);
|
||||
extern int tick_check_oneshot_change(int allow_nohz);
|
||||
extern struct tick_sched *tick_get_tick_sched(int cpu);
|
||||
extern void tick_check_idle(void);
|
||||
extern void tick_irq_enter(void);
|
||||
extern int tick_oneshot_mode_active(void);
|
||||
# ifndef arch_needs_cpu
|
||||
# define arch_needs_cpu(cpu) (0)
|
||||
@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
|
||||
# else
|
||||
static inline void tick_clock_notify(void) { }
|
||||
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
||||
static inline void tick_check_idle(void) { }
|
||||
static inline void tick_irq_enter(void) { }
|
||||
static inline int tick_oneshot_mode_active(void) { return 0; }
|
||||
# endif
|
||||
|
||||
@ -121,7 +121,7 @@ static inline void tick_init(void) { }
|
||||
static inline void tick_cancel_sched_timer(int cpu) { }
|
||||
static inline void tick_clock_notify(void) { }
|
||||
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
||||
static inline void tick_check_idle(void) { }
|
||||
static inline void tick_irq_enter(void) { }
|
||||
static inline int tick_oneshot_mode_active(void) { return 0; }
|
||||
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
||||
|
||||
|
@ -2453,7 +2453,7 @@ u64 scheduler_tick_max_deferment(void)
|
||||
if (time_before_eq(next, now))
|
||||
return 0;
|
||||
|
||||
return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
|
||||
return jiffies_to_nsecs(next - now);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -326,7 +326,7 @@ void irq_enter(void)
|
||||
* here, as softirq will be serviced on return from interrupt.
|
||||
*/
|
||||
local_bh_disable();
|
||||
tick_check_idle();
|
||||
tick_irq_enter();
|
||||
_local_bh_enable();
|
||||
}
|
||||
|
||||
|
@ -533,12 +533,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
u64 time_delta;
|
||||
|
||||
time_delta = timekeeping_max_deferment();
|
||||
|
||||
/* Read jiffies and the time when jiffies were updated last */
|
||||
do {
|
||||
seq = read_seqbegin(&jiffies_lock);
|
||||
last_update = last_jiffies_update;
|
||||
last_jiffies = jiffies;
|
||||
time_delta = timekeeping_max_deferment();
|
||||
} while (read_seqretry(&jiffies_lock, seq));
|
||||
|
||||
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
|
||||
@ -678,18 +679,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
static void tick_nohz_full_stop_tick(struct tick_sched *ts)
|
||||
{
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
int cpu = smp_processor_id();
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
|
||||
return;
|
||||
if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
|
||||
return;
|
||||
|
||||
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
|
||||
return;
|
||||
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
|
||||
return;
|
||||
|
||||
if (!can_stop_full_tick())
|
||||
return;
|
||||
if (!can_stop_full_tick())
|
||||
return;
|
||||
|
||||
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
|
||||
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1023,7 +1024,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tick_check_nohz_this_cpu(void)
|
||||
static inline void tick_nohz_irq_enter(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
ktime_t now;
|
||||
@ -1042,17 +1043,17 @@ static inline void tick_check_nohz_this_cpu(void)
|
||||
#else
|
||||
|
||||
static inline void tick_nohz_switch_to_nohz(void) { }
|
||||
static inline void tick_check_nohz_this_cpu(void) { }
|
||||
static inline void tick_nohz_irq_enter(void) { }
|
||||
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
/*
|
||||
* Called from irq_enter to notify about the possible interruption of idle()
|
||||
*/
|
||||
void tick_check_idle(void)
|
||||
void tick_irq_enter(void)
|
||||
{
|
||||
tick_check_oneshot_broadcast_this_cpu();
|
||||
tick_check_nohz_this_cpu();
|
||||
tick_nohz_irq_enter();
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user