mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 11:46:44 +07:00
9b8de7479d
In non-SMP mode, the variable section attribute specified by DECLARE_PER_CPU() does not agree with that specified by DEFINE_PER_CPU(). This means that architectures that have a small data section references relative to a base register may throw up linkage errors due to too great a displacement between where the base register points and the per-CPU variable. On FRV, the .h declaration says that the variable is in the .sdata section, but the .c definition says it's actually in the .data section. The linker throws up the following errors: kernel/built-in.o: In function `release_task': kernel/exit.c:78: relocation truncated to fit: R_FRV_GPREL12 against symbol `per_cpu__process_counts' defined in .data section in kernel/built-in.o kernel/exit.c:78: relocation truncated to fit: R_FRV_GPREL12 against symbol `per_cpu__process_counts' defined in .data section in kernel/built-in.o To fix this, DECLARE_PER_CPU() should simply apply the same section attribute as does DEFINE_PER_CPU(). However, this is made slightly more complex by virtue of the fact that there are several variants on DEFINE, so these need to be matched by variants on DECLARE. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
54 lines
1.4 KiB
C
54 lines
1.4 KiB
C
#ifndef _ASM_X86_HARDIRQ_H
|
|
#define _ASM_X86_HARDIRQ_H
|
|
|
|
#include <linux/threads.h>
|
|
#include <linux/irq.h>
|
|
|
|
typedef struct {
|
|
unsigned int __softirq_pending;
|
|
unsigned int __nmi_count; /* arch dependent */
|
|
unsigned int irq0_irqs;
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
unsigned int apic_timer_irqs; /* arch dependent */
|
|
unsigned int irq_spurious_count;
|
|
#endif
|
|
unsigned int generic_irqs; /* arch dependent */
|
|
#ifdef CONFIG_SMP
|
|
unsigned int irq_resched_count;
|
|
unsigned int irq_call_count;
|
|
unsigned int irq_tlb_count;
|
|
#endif
|
|
#ifdef CONFIG_X86_MCE
|
|
unsigned int irq_thermal_count;
|
|
# ifdef CONFIG_X86_64
|
|
unsigned int irq_threshold_count;
|
|
# endif
|
|
#endif
|
|
} ____cacheline_aligned irq_cpustat_t;
|
|
|
|
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
|
|
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
|
|
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
|
|
|
|
#define __ARCH_IRQ_STAT
|
|
|
|
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
|
|
|
|
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
|
|
|
|
#define __ARCH_SET_SOFTIRQ_PENDING
|
|
|
|
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
|
|
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
|
|
|
|
extern void ack_bad_irq(unsigned int irq);
|
|
|
|
extern u64 arch_irq_stat_cpu(unsigned int cpu);
|
|
#define arch_irq_stat_cpu arch_irq_stat_cpu
|
|
|
|
extern u64 arch_irq_stat(void);
|
|
#define arch_irq_stat arch_irq_stat
|
|
|
|
#endif /* _ASM_X86_HARDIRQ_H */
|