mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 03:40:55 +07:00
79741dd357
The cpu time spent by the idle process actually doing something is currently accounted as idle time. This is plain wrong, the architectures that support VIRT_CPU_ACCOUNTING=y can do better: distinguish between the time spent doing nothing and the time spent by idle doing work. The first is accounted with account_idle_time and the second with account_system_time. The architectures that use the account_xxx_time interface directly and not the account_xxx_ticks interface now need to do the check for the idle process in their arch code. In particular to improve the system vs true idle time accounting the arch code needs to measure the true idle time instead of just testing for the idle process. To improve the tick based accounting as well we would need an architecture primitive that can tell us if the pt_regs of the interrupted context points to the magic instruction that halts the cpu. In addition idle time is no more added to the stime of the idle process. This field now contains the system time of the idle process as it should be. On systems without VIRT_CPU_ACCOUNTING this will always be zero as every tick that occurs while idle is running will be accounted as idle time. This patch contains the necessary common code changes to be able to distinguish idle system time and true idle time. The architectures with support for VIRT_CPU_ACCOUNTING need some changes to exploit this. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
92 lines
2.1 KiB
C
92 lines
2.1 KiB
C
#ifndef _LINUX_KERNEL_STAT_H
|
|
#define _LINUX_KERNEL_STAT_H
|
|
|
|
#include <linux/smp.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/cpumask.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/cputime.h>
|
|
|
|
/*
|
|
* 'kernel_stat.h' contains the definitions needed for doing
|
|
* some kernel statistics (CPU usage, context switches ...),
|
|
* used by rstatd/perfmeter
|
|
*/
|
|
|
|
struct cpu_usage_stat {
|
|
cputime64_t user;
|
|
cputime64_t nice;
|
|
cputime64_t system;
|
|
cputime64_t softirq;
|
|
cputime64_t irq;
|
|
cputime64_t idle;
|
|
cputime64_t iowait;
|
|
cputime64_t steal;
|
|
cputime64_t guest;
|
|
};
|
|
|
|
struct kernel_stat {
|
|
struct cpu_usage_stat cpustat;
|
|
#ifndef CONFIG_SPARSE_IRQ
|
|
unsigned int irqs[NR_IRQS];
|
|
#endif
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|
|
|
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
|
/* Must have preemption disabled for this to be meaningful. */
|
|
#define kstat_this_cpu __get_cpu_var(kstat)
|
|
|
|
extern unsigned long long nr_context_switches(void);
|
|
|
|
#ifndef CONFIG_SPARSE_IRQ
|
|
#define kstat_irqs_this_cpu(irq) \
|
|
(kstat_this_cpu.irqs[irq])
|
|
|
|
struct irq_desc;
|
|
|
|
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
|
struct irq_desc *desc)
|
|
{
|
|
kstat_this_cpu.irqs[irq]++;
|
|
}
|
|
#endif
|
|
|
|
|
|
#ifndef CONFIG_SPARSE_IRQ
|
|
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
|
{
|
|
return kstat_cpu(cpu).irqs[irq];
|
|
}
|
|
#else
|
|
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
|
#endif
|
|
|
|
/*
|
|
* Number of interrupts per specific IRQ source, since bootup
|
|
*/
|
|
static inline unsigned int kstat_irqs(unsigned int irq)
|
|
{
|
|
unsigned int sum = 0;
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
sum += kstat_irqs_cpu(irq, cpu);
|
|
|
|
return sum;
|
|
}
|
|
|
|
extern unsigned long long task_delta_exec(struct task_struct *);
|
|
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
|
|
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
|
|
extern void account_steal_time(cputime_t);
|
|
extern void account_idle_time(cputime_t);
|
|
|
|
extern void account_process_tick(struct task_struct *, int user);
|
|
extern void account_steal_ticks(unsigned long ticks);
|
|
extern void account_idle_ticks(unsigned long ticks);
|
|
|
|
#endif /* _LINUX_KERNEL_STAT_H */
|