mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 16:30:52 +07:00
[PARISC] Add IRQ affinities
This really only adds them for the machines I can check SMP on, which is CPU interrupts and IOSAPIC (so not any of the GSC based machines). With this patch, irqbalanced can be used to maintain irq balancing. Unfortunately, irqbalanced is a bit x86 centric, so it doesn't do an incredibly good job, but it does work. Signed-off-by: James Bottomley <jejb@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
This commit is contained in:
parent
1d4c452a85
commit
c2ab64d098
@ -30,6 +30,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
@ -84,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
|
||||
void no_ack_irq(unsigned int irq) { }
|
||||
void no_end_irq(unsigned int irq) { }
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
|
||||
{
|
||||
int cpu_dest;
|
||||
|
||||
/* timer and ipi have to always be received on all CPUs */
|
||||
if (irq == TIMER_IRQ || irq == IPI_IRQ) {
|
||||
/* Bad linux design decision. The mask has already
|
||||
* been set; we must reset it */
|
||||
irq_affinity[irq] = CPU_MASK_ALL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* whatever mask they set, we just allow one CPU */
|
||||
cpu_dest = first_cpu(*dest);
|
||||
*dest = cpumask_of_cpu(cpu_dest);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
|
||||
{
|
||||
if (cpu_check_affinity(irq, &dest))
|
||||
return;
|
||||
|
||||
irq_affinity[irq] = dest;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct hw_interrupt_type cpu_interrupt_type = {
|
||||
.typename = "CPU",
|
||||
.startup = cpu_startup_irq,
|
||||
@ -92,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
|
||||
.disable = cpu_disable_irq,
|
||||
.ack = no_ack_irq,
|
||||
.end = no_end_irq,
|
||||
// .set_affinity = cpu_set_affinity_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = cpu_set_affinity_irq,
|
||||
#endif
|
||||
};
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
@ -229,6 +261,13 @@ int txn_alloc_irq(unsigned int bits_wide)
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
||||
{
|
||||
irq_affinity[irq] = cpumask_of_cpu(cpu);
|
||||
|
||||
return cpu_data[cpu].txn_addr;
|
||||
}
|
||||
|
||||
unsigned long txn_alloc_addr(unsigned int virt_irq)
|
||||
{
|
||||
static int next_cpu = -1;
|
||||
@ -243,7 +282,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
|
||||
if (next_cpu >= NR_CPUS)
|
||||
next_cpu = 0; /* nothing else, assign monarch */
|
||||
|
||||
return cpu_data[next_cpu].txn_addr;
|
||||
return txn_affinity_addr(virt_irq, next_cpu);
|
||||
}
|
||||
|
||||
|
||||
@ -282,12 +321,29 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
|
||||
/* Work our way from MSb to LSb...same order we alloc EIRs */
|
||||
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
|
||||
cpumask_t dest = irq_affinity[irq];
|
||||
|
||||
if (!(bit & eirr_val))
|
||||
continue;
|
||||
|
||||
/* clear bit in mask - can exit loop sooner */
|
||||
eirr_val &= ~bit;
|
||||
|
||||
/* FIXME: because generic set affinity mucks
|
||||
* with the affinity before sending it to us
|
||||
* we can get the situation where the affinity is
|
||||
* wrong for our CPU type interrupts */
|
||||
if (irq != TIMER_IRQ && irq != IPI_IRQ &&
|
||||
!cpu_isset(smp_processor_id(), dest)) {
|
||||
int cpu = first_cpu(dest);
|
||||
|
||||
printk("rethrowing irq %d from %d to %d\n",
|
||||
irq, smp_processor_id(), cpu);
|
||||
gsc_writel(irq + CPU_IRQ_BASE,
|
||||
cpu_data[cpu].hpa);
|
||||
continue;
|
||||
}
|
||||
|
||||
__do_IRQ(irq, regs);
|
||||
}
|
||||
}
|
||||
|
@ -700,6 +700,28 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest)
|
||||
{
|
||||
struct vector_info *vi = iosapic_get_vector(irq);
|
||||
u32 d0, d1, dummy_d0;
|
||||
unsigned long flags;
|
||||
|
||||
if (cpu_check_affinity(irq, &dest))
|
||||
return;
|
||||
|
||||
vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest));
|
||||
|
||||
spin_lock_irqsave(&iosapic_lock, flags);
|
||||
/* d1 contains the destination CPU, so only want to set that
|
||||
* entry */
|
||||
iosapic_rd_irt_entry(vi, &d0, &d1);
|
||||
iosapic_set_irt_data(vi, &dummy_d0, &d1);
|
||||
iosapic_wr_irt_entry(vi, d0, d1);
|
||||
spin_unlock_irqrestore(&iosapic_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct hw_interrupt_type iosapic_interrupt_type = {
|
||||
.typename = "IO-SAPIC-level",
|
||||
.startup = iosapic_startup_irq,
|
||||
@ -708,7 +730,9 @@ static struct hw_interrupt_type iosapic_interrupt_type = {
|
||||
.disable = iosapic_disable_irq,
|
||||
.ack = no_ack_irq,
|
||||
.end = iosapic_end_irq,
|
||||
// .set_affinity = iosapic_set_affinity_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = iosapic_set_affinity_irq,
|
||||
#endif
|
||||
};
|
||||
|
||||
int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#define _ASM_PARISC_IRQ_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#define NO_IRQ (-1)
|
||||
@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
|
||||
extern int txn_claim_irq(int);
|
||||
extern unsigned int txn_alloc_data(unsigned int);
|
||||
extern unsigned long txn_alloc_addr(unsigned int);
|
||||
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
|
||||
|
||||
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
|
||||
|
||||
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
|
||||
extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
|
||||
|
||||
/* soft power switch support (power.c) */
|
||||
extern struct tasklet_struct power_tasklet;
|
||||
|
Loading…
Reference in New Issue
Block a user