mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 18:26:41 +07:00
fb8b049c98
This is one more step toward converting cputime accounting to pure nsecs. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1485832191-26889-25-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
100 lines
2.5 KiB
C
100 lines
2.5 KiB
C
#ifndef _LINUX_KERNEL_STAT_H
|
|
#define _LINUX_KERNEL_STAT_H
|
|
|
|
#include <linux/smp.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/vtime.h>
|
|
#include <asm/irq.h>
|
|
|
|
/*
|
|
* 'kernel_stat.h' contains the definitions needed for doing
|
|
* some kernel statistics (CPU usage, context switches ...),
|
|
* used by rstatd/perfmeter
|
|
*/
|
|
|
|
enum cpu_usage_stat {
|
|
CPUTIME_USER,
|
|
CPUTIME_NICE,
|
|
CPUTIME_SYSTEM,
|
|
CPUTIME_SOFTIRQ,
|
|
CPUTIME_IRQ,
|
|
CPUTIME_IDLE,
|
|
CPUTIME_IOWAIT,
|
|
CPUTIME_STEAL,
|
|
CPUTIME_GUEST,
|
|
CPUTIME_GUEST_NICE,
|
|
NR_STATS,
|
|
};
|
|
|
|
struct kernel_cpustat {
|
|
u64 cpustat[NR_STATS];
|
|
};
|
|
|
|
struct kernel_stat {
|
|
unsigned long irqs_sum;
|
|
unsigned int softirqs[NR_SOFTIRQS];
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
|
|
|
/* Must have preemption disabled for this to be meaningful. */
|
|
#define kstat_this_cpu this_cpu_ptr(&kstat)
|
|
#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
|
|
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
|
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
|
|
|
|
extern unsigned long long nr_context_switches(void);
|
|
|
|
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
|
extern void kstat_incr_irq_this_cpu(unsigned int irq);
|
|
|
|
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
|
|
{
|
|
__this_cpu_inc(kstat.softirqs[irq]);
|
|
}
|
|
|
|
static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
|
|
{
|
|
return kstat_cpu(cpu).softirqs[irq];
|
|
}
|
|
|
|
/*
|
|
* Number of interrupts per specific IRQ source, since bootup
|
|
*/
|
|
extern unsigned int kstat_irqs(unsigned int irq);
|
|
extern unsigned int kstat_irqs_usr(unsigned int irq);
|
|
|
|
/*
|
|
* Number of interrupts per cpu, since bootup
|
|
*/
|
|
static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
|
|
{
|
|
return kstat_cpu(cpu).irqs_sum;
|
|
}
|
|
|
|
extern void account_user_time(struct task_struct *, u64);
|
|
extern void account_guest_time(struct task_struct *, u64);
|
|
extern void account_system_time(struct task_struct *, int, u64);
|
|
extern void account_system_index_time(struct task_struct *, u64,
|
|
enum cpu_usage_stat);
|
|
extern void account_steal_time(u64);
|
|
extern void account_idle_time(u64);
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
static inline void account_process_tick(struct task_struct *tsk, int user)
|
|
{
|
|
vtime_flush(tsk);
|
|
}
|
|
#else
|
|
extern void account_process_tick(struct task_struct *, int user);
|
|
#endif
|
|
|
|
extern void account_idle_ticks(unsigned long ticks);
|
|
|
|
#endif /* _LINUX_KERNEL_STAT_H */
|