mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-14 18:46:45 +07:00
69a37beabf
The prediction for future is difficult and when the cpuidle governor prediction fails and govenor possibly choose the shallower C-state than it should. How to quickly notice and find the failure becomes important for power saving. cpuidle menu governor has a method to predict the repeat pattern if there are 8 C-states residency which are continuous and the same or very close, so it will predict the next C-states residency will keep same residency time. There is a real case that turbostat utility (tools/power/x86/turbostat) at kernel 3.3 or early. turbostat utility will read 10 registers one by one at Sandybridge, so it will generate 10 IPIs to wake up idle CPUs. So cpuidle menu governor will predict it is repeat mode and there is another IPI wake up idle CPU soon, so it keeps idle CPU stay at C1 state even though CPU is totally idle. However, in the turbostat, following 10 registers reading is sleep 5 seconds by default, so the idle CPU will keep at C1 for a long time though it is idle until break event occurs. In a idle Sandybridge system, run "./turbostat -v", we will notice that deep C-state dangles between "70% ~ 99%". After patched the kernel, we will notice deep C-state stays at >99.98%. In the patch, a timer is added when menu governor detects a repeat mode and choose a shallow C-state. The timer is set to a time out value that greater than predicted time, and we conclude repeat mode prediction failure if timer is triggered. When repeat mode happens as expected, the timer is not triggered and CPU waken up from C-states and it will cancel the timer initiatively. When repeat mode does not happen, the timer will be time out and menu governor will quickly notice that the repeat mode prediction fails and then re-evaluates deeper C-states possibility. Below is another case which will clearly show the patch much benefit: #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <signal.h> #include <sys/time.h> #include <time.h> #include <pthread.h> volatile int * shutdown; volatile long * count; int delay = 20; int loop = 8; void usage(void) { fprintf(stderr, "Usage: idle_predict [options]\n" " --help -h Print this help\n" " --thread -n Thread number\n" " --loop -l Loop times in shallow Cstate\n" " --delay -t Sleep time (uS)in shallow Cstate\n"); } void *simple_loop() { int idle_num = 1; while (!(*shutdown)) { *count = *count + 1; if (idle_num % loop) usleep(delay); else { /* sleep 1 second */ usleep(1000000); idle_num = 0; } idle_num++; } } static void sighand(int sig) { *shutdown = 1; } int main(int argc, char *argv[]) { sigset_t sigset; int signum = SIGALRM; int i, c, er = 0, thread_num = 8; pthread_t pt[1024]; static char optstr[] = "n:l:t:h:"; while ((c = getopt(argc, argv, optstr)) != EOF) switch (c) { case 'n': thread_num = atoi(optarg); break; case 'l': loop = atoi(optarg); break; case 't': delay = atoi(optarg); break; case 'h': default: usage(); exit(1); } printf("thread=%d,loop=%d,delay=%d\n",thread_num,loop,delay); count = malloc(sizeof(long)); shutdown = malloc(sizeof(int)); *count = 0; *shutdown = 0; sigemptyset(&sigset); sigaddset(&sigset, signum); sigprocmask (SIG_BLOCK, &sigset, NULL); signal(SIGINT, sighand); signal(SIGTERM, sighand); for(i = 0; i < thread_num ; i++) pthread_create(&pt[i], NULL, simple_loop, NULL); for (i = 0; i < thread_num; i++) pthread_join(pt[i], NULL); exit(0); } Get powertop V2 from git://github.com/fenrus75/powertop, build powertop. After build the above test application, then run it. Test plaform can be Intel Sandybridge or other recent platforms. #./idle_predict -l 10 & #./powertop We will find that deep C-state will dangle between 40%~100% and much time spent on C1 state. It is because menu governor wrongly predict that repeat mode is kept, so it will choose the C1 shallow C-state even though it has chance to sleep 1 second in deep C-state. While after patched the kernel, we find that deep C-state will keep >99.6%. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
152 lines
4.7 KiB
C
152 lines
4.7 KiB
C
/* linux/include/linux/tick.h
|
|
*
|
|
* This file contains the structure definitions for tick related functions
|
|
*
|
|
*/
|
|
#ifndef _LINUX_TICK_H
|
|
#define _LINUX_TICK_H
|
|
|
|
#include <linux/clockchips.h>
|
|
#include <linux/irqflags.h>
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
|
|
|
enum tick_device_mode {
|
|
TICKDEV_MODE_PERIODIC,
|
|
TICKDEV_MODE_ONESHOT,
|
|
};
|
|
|
|
struct tick_device {
|
|
struct clock_event_device *evtdev;
|
|
enum tick_device_mode mode;
|
|
};
|
|
|
|
enum tick_nohz_mode {
|
|
NOHZ_MODE_INACTIVE,
|
|
NOHZ_MODE_LOWRES,
|
|
NOHZ_MODE_HIGHRES,
|
|
};
|
|
|
|
/**
|
|
* struct tick_sched - sched tick emulation and no idle tick control/stats
|
|
* @sched_timer: hrtimer to schedule the periodic tick in high
|
|
* resolution mode
|
|
* @last_tick: Store the last tick expiry time when the tick
|
|
* timer is modified for nohz sleeps. This is necessary
|
|
* to resume the tick timer operation in the timeline
|
|
* when the CPU returns from nohz sleep.
|
|
* @tick_stopped: Indicator that the idle tick has been stopped
|
|
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
|
|
* @idle_calls: Total number of idle calls
|
|
* @idle_sleeps: Number of idle calls, where the sched tick was stopped
|
|
* @idle_entrytime: Time when the idle call was entered
|
|
* @idle_waketime: Time when the idle was interrupted
|
|
* @idle_exittime: Time when the idle state was left
|
|
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
|
|
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
|
|
* @sleep_length: Duration of the current idle sleep
|
|
* @do_timer_lst: CPU was the last one doing do_timer before going idle
|
|
*/
|
|
struct tick_sched {
|
|
struct hrtimer sched_timer;
|
|
unsigned long check_clocks;
|
|
enum tick_nohz_mode nohz_mode;
|
|
ktime_t last_tick;
|
|
int inidle;
|
|
int tick_stopped;
|
|
unsigned long idle_jiffies;
|
|
unsigned long idle_calls;
|
|
unsigned long idle_sleeps;
|
|
int idle_active;
|
|
ktime_t idle_entrytime;
|
|
ktime_t idle_waketime;
|
|
ktime_t idle_exittime;
|
|
ktime_t idle_sleeptime;
|
|
ktime_t iowait_sleeptime;
|
|
ktime_t sleep_length;
|
|
unsigned long last_jiffies;
|
|
unsigned long next_jiffies;
|
|
ktime_t idle_expires;
|
|
int do_timer_last;
|
|
};
|
|
|
|
extern void __init tick_init(void);
|
|
extern int tick_is_oneshot_available(void);
|
|
extern struct tick_device *tick_get_device(int cpu);
|
|
|
|
# ifdef CONFIG_HIGH_RES_TIMERS
|
|
extern int tick_init_highres(void);
|
|
extern int tick_program_event(ktime_t expires, int force);
|
|
extern void tick_setup_sched_timer(void);
|
|
# endif
|
|
|
|
# if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
|
|
extern void tick_cancel_sched_timer(int cpu);
|
|
# else
|
|
static inline void tick_cancel_sched_timer(int cpu) { }
|
|
# endif
|
|
|
|
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
extern struct tick_device *tick_get_broadcast_device(void);
|
|
extern struct cpumask *tick_get_broadcast_mask(void);
|
|
|
|
# ifdef CONFIG_TICK_ONESHOT
|
|
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
|
# endif
|
|
|
|
# endif /* BROADCAST */
|
|
|
|
# ifdef CONFIG_TICK_ONESHOT
|
|
extern void tick_clock_notify(void);
|
|
extern int tick_check_oneshot_change(int allow_nohz);
|
|
extern struct tick_sched *tick_get_tick_sched(int cpu);
|
|
extern void tick_check_idle(int cpu);
|
|
extern int tick_oneshot_mode_active(void);
|
|
# ifndef arch_needs_cpu
|
|
# define arch_needs_cpu(cpu) (0)
|
|
# endif
|
|
# else
|
|
static inline void tick_clock_notify(void) { }
|
|
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
|
static inline void tick_check_idle(int cpu) { }
|
|
static inline int tick_oneshot_mode_active(void) { return 0; }
|
|
# endif
|
|
|
|
#else /* CONFIG_GENERIC_CLOCKEVENTS */
|
|
static inline void tick_init(void) { }
|
|
static inline void tick_cancel_sched_timer(int cpu) { }
|
|
static inline void tick_clock_notify(void) { }
|
|
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
|
static inline void tick_check_idle(int cpu) { }
|
|
static inline int tick_oneshot_mode_active(void) { return 0; }
|
|
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
|
|
|
# ifdef CONFIG_NO_HZ
|
|
extern void tick_nohz_idle_enter(void);
|
|
extern void tick_nohz_idle_exit(void);
|
|
extern void tick_nohz_irq_exit(void);
|
|
extern ktime_t tick_nohz_get_sleep_length(void);
|
|
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
|
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
|
# else
|
|
static inline void tick_nohz_idle_enter(void) { }
|
|
static inline void tick_nohz_idle_exit(void) { }
|
|
|
|
static inline ktime_t tick_nohz_get_sleep_length(void)
|
|
{
|
|
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
|
|
|
|
return len;
|
|
}
|
|
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
|
|
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
|
# endif /* !NO_HZ */
|
|
|
|
# ifdef CONFIG_CPU_IDLE_GOV_MENU
|
|
extern void menu_hrtimer_cancel(void);
|
|
# else
|
|
static inline void menu_hrtimer_cancel(void) {}
|
|
# endif /* CONFIG_CPU_IDLE_GOV_MENU */
|
|
|
|
#endif
|