2009-01-23 09:03:31 +07:00
|
|
|
#ifndef _ASM_X86_HARDIRQ_H
|
|
|
|
#define _ASM_X86_HARDIRQ_H
|
|
|
|
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
unsigned int __softirq_pending;
|
|
|
|
unsigned int __nmi_count; /* arch dependent */
|
2009-01-23 09:03:32 +07:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
unsigned int apic_timer_irqs; /* arch dependent */
|
|
|
|
unsigned int irq_spurious_count;
|
2011-12-15 09:32:24 +07:00
|
|
|
unsigned int icr_read_retry_count;
|
2013-04-11 18:25:11 +07:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_KVM
|
|
|
|
unsigned int kvm_posted_intr_ipis;
|
2015-05-19 16:07:16 +07:00
|
|
|
unsigned int kvm_posted_intr_wakeup_ipis;
|
2009-01-23 09:03:32 +07:00
|
|
|
#endif
|
2009-10-14 21:22:57 +07:00
|
|
|
unsigned int x86_platform_ipis; /* arch dependent */
|
2009-01-23 16:20:15 +07:00
|
|
|
unsigned int apic_perf_irqs;
|
2010-10-14 13:01:34 +07:00
|
|
|
unsigned int apic_irq_work_irqs;
|
2009-01-23 09:03:32 +07:00
|
|
|
#ifdef CONFIG_SMP
|
2009-01-23 09:03:31 +07:00
|
|
|
unsigned int irq_resched_count;
|
|
|
|
unsigned int irq_call_count;
|
2012-09-26 09:11:28 +07:00
|
|
|
/*
|
|
|
|
* irq_tlb_count is double-counted in irq_call_count, so it must be
|
|
|
|
* subtracted from irq_call_count when displaying irq_call_count
|
|
|
|
*/
|
2009-01-23 09:03:31 +07:00
|
|
|
unsigned int irq_tlb_count;
|
2009-01-23 09:03:32 +07:00
|
|
|
#endif
|
2009-11-20 21:03:05 +07:00
|
|
|
#ifdef CONFIG_X86_THERMAL_VECTOR
|
2009-01-23 09:03:31 +07:00
|
|
|
unsigned int irq_thermal_count;
|
2009-11-20 21:03:05 +07:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_MCE_THRESHOLD
|
2009-01-23 09:03:31 +07:00
|
|
|
unsigned int irq_threshold_count;
|
2009-01-23 09:03:32 +07:00
|
|
|
#endif
|
2015-05-06 18:58:56 +07:00
|
|
|
#ifdef CONFIG_X86_MCE_AMD
|
|
|
|
unsigned int irq_deferred_error_count;
|
|
|
|
#endif
|
2014-03-06 18:08:37 +07:00
|
|
|
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
|
2014-02-24 04:40:20 +07:00
|
|
|
unsigned int irq_hv_callback_count;
|
|
|
|
#endif
|
2009-01-23 09:03:31 +07:00
|
|
|
} ____cacheline_aligned irq_cpustat_t;
|
|
|
|
|
2009-04-22 05:00:24 +07:00
|
|
|
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
2009-01-23 09:03:31 +07:00
|
|
|
|
|
|
|
#define __ARCH_IRQ_STAT
|
|
|
|
|
2012-05-11 14:35:27 +07:00
|
|
|
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
|
2009-01-23 09:03:31 +07:00
|
|
|
|
2012-05-11 14:35:27 +07:00
|
|
|
#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
|
2009-01-23 09:03:31 +07:00
|
|
|
|
|
|
|
#define __ARCH_SET_SOFTIRQ_PENDING
|
|
|
|
|
2012-05-11 14:35:27 +07:00
|
|
|
#define set_softirq_pending(x) \
|
|
|
|
this_cpu_write(irq_stat.__softirq_pending, (x))
|
|
|
|
#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
|
2009-01-23 09:03:31 +07:00
|
|
|
|
|
|
|
extern void ack_bad_irq(unsigned int irq);
|
2008-05-12 20:44:41 +07:00
|
|
|
|
|
|
|
extern u64 arch_irq_stat_cpu(unsigned int cpu);
|
|
|
|
#define arch_irq_stat_cpu arch_irq_stat_cpu
|
|
|
|
|
|
|
|
extern u64 arch_irq_stat(void);
|
|
|
|
#define arch_irq_stat arch_irq_stat
|
2009-01-23 09:03:31 +07:00
|
|
|
|
|
|
|
#endif /* _ASM_X86_HARDIRQ_H */
|