2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* File: msi.c
|
|
|
|
* Purpose: PCI Message Signaled Interrupt (MSI)
|
|
|
|
*
|
|
|
|
* Copyright (C) 2003-2004 Intel
|
|
|
|
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/smp_lock.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
|
|
|
|
#include "pci.h"
|
|
|
|
#include "msi.h"
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(msi_lock);
|
|
|
|
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
|
|
|
|
static kmem_cache_t* msi_cachep;
|
|
|
|
|
|
|
|
static int pci_msi_enable = 1;
|
2005-06-07 13:07:46 +07:00
|
|
|
static int last_alloc_vector;
|
|
|
|
static int nr_released_vectors;
|
2005-04-17 05:20:36 +07:00
|
|
|
static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
|
2005-06-07 13:07:46 +07:00
|
|
|
static int nr_msix_devices;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifndef CONFIG_X86_IO_APIC
|
|
|
|
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
|
|
|
|
#endif
|
|
|
|
|
2006-04-11 09:17:48 +07:00
|
|
|
static struct msi_ops *msi_ops;
|
|
|
|
|
|
|
|
int
|
|
|
|
msi_register(struct msi_ops *ops)
|
|
|
|
{
|
|
|
|
msi_ops = ops;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static int msi_cache_init(void)
|
|
|
|
{
|
2006-09-27 15:51:03 +07:00
|
|
|
msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
|
|
|
|
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!msi_cachep)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msi_set_mask_bit(unsigned int vector, int flag)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
|
|
|
|
entry = (struct msi_desc *)msi_desc[vector];
|
|
|
|
if (!entry || !entry->dev || !entry->mask_base)
|
|
|
|
return;
|
|
|
|
switch (entry->msi_attrib.type) {
|
|
|
|
case PCI_CAP_ID_MSI:
|
|
|
|
{
|
|
|
|
int pos;
|
|
|
|
u32 mask_bits;
|
|
|
|
|
|
|
|
pos = (long)entry->mask_base;
|
|
|
|
pci_read_config_dword(entry->dev, pos, &mask_bits);
|
|
|
|
mask_bits &= ~(1);
|
|
|
|
mask_bits |= flag;
|
|
|
|
pci_write_config_dword(entry->dev, pos, mask_bits);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case PCI_CAP_ID_MSIX:
|
|
|
|
{
|
|
|
|
int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
|
|
|
|
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
|
|
|
|
writel(flag, entry->mask_base + offset);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-04 16:16:33 +07:00
|
|
|
static void read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-10-04 16:16:33 +07:00
|
|
|
switch(entry->msi_attrib.type) {
|
|
|
|
case PCI_CAP_ID_MSI:
|
|
|
|
{
|
|
|
|
struct pci_dev *dev = entry->dev;
|
|
|
|
int pos = entry->msi_attrib.pos;
|
|
|
|
u16 data;
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, msi_lower_address_reg(pos),
|
|
|
|
&msg->address_lo);
|
|
|
|
if (entry->msi_attrib.is_64) {
|
|
|
|
pci_read_config_dword(dev, msi_upper_address_reg(pos),
|
|
|
|
&msg->address_hi);
|
|
|
|
pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
|
|
|
|
} else {
|
|
|
|
msg->address_hi = 0;
|
|
|
|
pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
|
|
|
|
}
|
|
|
|
msg->data = data;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case PCI_CAP_ID_MSIX:
|
|
|
|
{
|
|
|
|
void __iomem *base;
|
|
|
|
base = entry->mask_base +
|
|
|
|
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
|
|
|
|
|
|
|
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
|
|
|
|
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
|
|
|
|
msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-04 16:16:33 +07:00
|
|
|
static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
|
|
|
{
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (entry->msi_attrib.type) {
|
|
|
|
case PCI_CAP_ID_MSI:
|
|
|
|
{
|
2006-10-04 16:16:33 +07:00
|
|
|
struct pci_dev *dev = entry->dev;
|
|
|
|
int pos = entry->msi_attrib.pos;
|
|
|
|
|
|
|
|
pci_write_config_dword(dev, msi_lower_address_reg(pos),
|
|
|
|
msg->address_lo);
|
|
|
|
if (entry->msi_attrib.is_64) {
|
|
|
|
pci_write_config_dword(dev, msi_upper_address_reg(pos),
|
|
|
|
msg->address_hi);
|
|
|
|
pci_write_config_word(dev, msi_data_reg(pos, 1),
|
|
|
|
msg->data);
|
|
|
|
} else {
|
|
|
|
pci_write_config_word(dev, msi_data_reg(pos, 0),
|
|
|
|
msg->data);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case PCI_CAP_ID_MSIX:
|
|
|
|
{
|
2006-10-04 16:16:33 +07:00
|
|
|
void __iomem *base;
|
|
|
|
base = entry->mask_base +
|
|
|
|
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
|
|
|
|
|
|
|
writel(msg->address_lo,
|
|
|
|
base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
|
|
|
|
writel(msg->address_hi,
|
|
|
|
base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
|
|
|
|
writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2006-10-04 16:16:33 +07:00
|
|
|
BUG();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
2006-10-04 16:16:33 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
struct msi_msg msg;
|
|
|
|
unsigned int irq = vector;
|
|
|
|
unsigned int dest_cpu = first_cpu(cpu_mask);
|
|
|
|
|
|
|
|
entry = (struct msi_desc *)msi_desc[vector];
|
|
|
|
if (!entry || !entry->dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
read_msi_msg(entry, &msg);
|
|
|
|
msi_ops->target(vector, dest_cpu, &msg.address_hi, &msg.address_lo);
|
|
|
|
write_msi_msg(entry, &msg);
|
|
|
|
set_native_irq_info(irq, cpu_mask);
|
|
|
|
}
|
2006-01-04 09:51:46 +07:00
|
|
|
#else
|
|
|
|
#define set_msi_affinity NULL
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
static void mask_MSI_irq(unsigned int vector)
|
|
|
|
{
|
|
|
|
msi_set_mask_bit(vector, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unmask_MSI_irq(unsigned int vector)
|
|
|
|
{
|
|
|
|
msi_set_mask_bit(vector, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
entry = msi_desc[vector];
|
|
|
|
if (!entry || !entry->dev) {
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
entry->msi_attrib.state = 1; /* Mark it active */
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
|
|
|
|
return 0; /* never anything pending */
|
|
|
|
}
|
|
|
|
|
2005-06-07 13:07:46 +07:00
|
|
|
static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-06-07 13:07:46 +07:00
|
|
|
startup_msi_irq_wo_maskbit(vector);
|
|
|
|
unmask_MSI_irq(vector);
|
|
|
|
return 0; /* never anything pending */
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2005-06-07 13:07:46 +07:00
|
|
|
static void shutdown_msi_irq(unsigned int vector)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
entry = msi_desc[vector];
|
2005-06-07 13:07:46 +07:00
|
|
|
if (entry && entry->dev)
|
|
|
|
entry->msi_attrib.state = 0; /* Mark it not active */
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
}
|
|
|
|
|
2005-06-07 13:07:46 +07:00
|
|
|
static void end_msi_irq_wo_maskbit(unsigned int vector)
|
|
|
|
{
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
move_native_irq(vector);
|
2005-06-07 13:07:46 +07:00
|
|
|
ack_APIC_irq();
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static void end_msi_irq_w_maskbit(unsigned int vector)
|
|
|
|
{
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
move_native_irq(vector);
|
2005-04-17 05:20:36 +07:00
|
|
|
unmask_MSI_irq(vector);
|
|
|
|
ack_APIC_irq();
|
|
|
|
}
|
|
|
|
|
2005-06-07 13:07:46 +07:00
|
|
|
static void do_nothing(unsigned int vector)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
|
|
|
|
* which implement the MSI-X Capability Structure.
|
|
|
|
*/
|
|
|
|
static struct hw_interrupt_type msix_irq_type = {
|
|
|
|
.typename = "PCI-MSI-X",
|
|
|
|
.startup = startup_msi_irq_w_maskbit,
|
2005-06-07 13:07:46 +07:00
|
|
|
.shutdown = shutdown_msi_irq,
|
|
|
|
.enable = unmask_MSI_irq,
|
|
|
|
.disable = mask_MSI_irq,
|
|
|
|
.ack = mask_MSI_irq,
|
2005-04-17 05:20:36 +07:00
|
|
|
.end = end_msi_irq_w_maskbit,
|
2006-01-04 09:51:46 +07:00
|
|
|
.set_affinity = set_msi_affinity
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
|
|
|
|
* which implement the MSI Capability Structure with
|
|
|
|
* Mask-and-Pending Bits.
|
|
|
|
*/
|
|
|
|
static struct hw_interrupt_type msi_irq_w_maskbit_type = {
|
|
|
|
.typename = "PCI-MSI",
|
|
|
|
.startup = startup_msi_irq_w_maskbit,
|
2005-06-07 13:07:46 +07:00
|
|
|
.shutdown = shutdown_msi_irq,
|
|
|
|
.enable = unmask_MSI_irq,
|
|
|
|
.disable = mask_MSI_irq,
|
|
|
|
.ack = mask_MSI_irq,
|
2005-04-17 05:20:36 +07:00
|
|
|
.end = end_msi_irq_w_maskbit,
|
2006-01-04 09:51:46 +07:00
|
|
|
.set_affinity = set_msi_affinity
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
|
|
|
|
* which implement the MSI Capability Structure without
|
|
|
|
* Mask-and-Pending Bits.
|
|
|
|
*/
|
|
|
|
static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
|
|
|
|
.typename = "PCI-MSI",
|
|
|
|
.startup = startup_msi_irq_wo_maskbit,
|
2005-06-07 13:07:46 +07:00
|
|
|
.shutdown = shutdown_msi_irq,
|
|
|
|
.enable = do_nothing,
|
|
|
|
.disable = do_nothing,
|
|
|
|
.ack = do_nothing,
|
2005-04-17 05:20:36 +07:00
|
|
|
.end = end_msi_irq_wo_maskbit,
|
2006-01-04 09:51:46 +07:00
|
|
|
.set_affinity = set_msi_affinity
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
|
|
|
|
static int assign_msi_vector(void)
|
|
|
|
{
|
|
|
|
static int new_vector_avail = 1;
|
|
|
|
int vector;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* msi_lock is provided to ensure that successful allocation of MSI
|
|
|
|
* vector is assigned unique among drivers.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
|
|
|
|
if (!new_vector_avail) {
|
|
|
|
int free_vector = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vector_irq[] = -1 indicates that this specific vector is:
|
|
|
|
* - assigned for MSI (since MSI have no associated IRQ) or
|
|
|
|
* - assigned for legacy if less than 16, or
|
|
|
|
* - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
|
|
|
|
* vector_irq[] = 0 indicates that this vector, previously
|
|
|
|
* assigned for MSI, is freed by hotplug removed operations.
|
|
|
|
* This vector will be reused for any subsequent hotplug added
|
|
|
|
* operations.
|
|
|
|
* vector_irq[] > 0 indicates that this vector is assigned for
|
|
|
|
* IOxAPIC IRQs. This vector and its value provides a 1-to-1
|
|
|
|
* vector-to-IOxAPIC IRQ mapping.
|
|
|
|
*/
|
|
|
|
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
|
|
|
|
if (vector_irq[vector] != 0)
|
|
|
|
continue;
|
|
|
|
free_vector = vector;
|
|
|
|
if (!msi_desc[vector])
|
|
|
|
break;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!free_vector) {
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
vector_irq[free_vector] = -1;
|
|
|
|
nr_released_vectors--;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
if (msi_desc[free_vector] != NULL) {
|
|
|
|
struct pci_dev *dev;
|
|
|
|
int tail;
|
|
|
|
|
|
|
|
/* free all linked vectors before re-assign */
|
|
|
|
do {
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
dev = msi_desc[free_vector]->dev;
|
|
|
|
tail = msi_desc[free_vector]->link.tail;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
msi_free_vector(dev, tail, 1);
|
|
|
|
} while (free_vector != tail);
|
|
|
|
}
|
|
|
|
|
|
|
|
return free_vector;
|
|
|
|
}
|
|
|
|
vector = assign_irq_vector(AUTO_ASSIGN);
|
|
|
|
last_alloc_vector = vector;
|
|
|
|
if (vector == LAST_DEVICE_VECTOR)
|
|
|
|
new_vector_avail = 0;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
return vector;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_new_vector(void)
|
|
|
|
{
|
2006-01-14 14:34:53 +07:00
|
|
|
int vector = assign_msi_vector();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
if (vector > 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
set_intr_gate(vector, interrupt[vector]);
|
|
|
|
|
|
|
|
return vector;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_init(void)
|
|
|
|
{
|
|
|
|
static int status = -ENOMEM;
|
|
|
|
|
|
|
|
if (!status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (pci_msi_quirk) {
|
|
|
|
pci_msi_enable = 0;
|
|
|
|
printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
|
|
|
|
status = -EINVAL;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-04-11 09:17:48 +07:00
|
|
|
status = msi_arch_init();
|
|
|
|
if (status < 0) {
|
|
|
|
pci_msi_enable = 0;
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"PCI: MSI arch init failed. MSI disabled.\n");
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (! msi_ops) {
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"PCI: MSI ops not registered. MSI disabled.\n");
|
|
|
|
status = -EINVAL;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
|
2006-01-14 14:34:53 +07:00
|
|
|
status = msi_cache_init();
|
|
|
|
if (status < 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
pci_msi_enable = 0;
|
|
|
|
printk(KERN_WARNING "PCI: MSI cache init failed\n");
|
|
|
|
return status;
|
|
|
|
}
|
2006-04-11 09:17:48 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (last_alloc_vector < 0) {
|
|
|
|
pci_msi_enable = 0;
|
|
|
|
printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
|
|
|
|
status = -EBUSY;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
vector_irq[last_alloc_vector] = 0;
|
|
|
|
nr_released_vectors++;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_msi_vector(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
return get_new_vector();
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct msi_desc* alloc_msi_entry(void)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
|
2006-09-27 15:51:03 +07:00
|
|
|
entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!entry)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
entry->link.tail = entry->link.head = 0; /* single message */
|
|
|
|
entry->dev = NULL;
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void attach_msi_entry(struct msi_desc *entry, int vector)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
msi_desc[vector] = entry;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irq_handler_init(int cap_id, int pos, int mask)
|
|
|
|
{
|
2006-01-26 07:42:11 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&irq_desc[pos].lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (cap_id == PCI_CAP_ID_MSIX)
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 16:24:36 +07:00
|
|
|
irq_desc[pos].chip = &msix_irq_type;
|
2005-04-17 05:20:36 +07:00
|
|
|
else {
|
|
|
|
if (!mask)
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 16:24:36 +07:00
|
|
|
irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 16:24:36 +07:00
|
|
|
irq_desc[pos].chip = &msi_irq_w_maskbit_type;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-01-26 07:42:11 +07:00
|
|
|
spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
|
|
|
|
{
|
|
|
|
u16 control;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
if (type == PCI_CAP_ID_MSI) {
|
|
|
|
/* Set enabled bits to single MSI & enable MSI_enable bit */
|
|
|
|
msi_enable(control, 1);
|
|
|
|
pci_write_config_word(dev, msi_control_reg(pos), control);
|
2006-05-26 09:58:27 +07:00
|
|
|
dev->msi_enabled = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
|
|
|
msix_enable(control);
|
|
|
|
pci_write_config_word(dev, msi_control_reg(pos), control);
|
2006-05-26 09:58:27 +07:00
|
|
|
dev->msix_enabled = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
|
|
|
|
/* PCI Express Endpoint device detected */
|
2005-08-16 02:23:41 +07:00
|
|
|
pci_intx(dev, 0); /* disable intx */
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-08-17 05:15:58 +07:00
|
|
|
void disable_msi_mode(struct pci_dev *dev, int pos, int type)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u16 control;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
if (type == PCI_CAP_ID_MSI) {
|
|
|
|
/* Set enabled bits to single MSI & enable MSI_enable bit */
|
|
|
|
msi_disable(control);
|
|
|
|
pci_write_config_word(dev, msi_control_reg(pos), control);
|
2006-05-26 09:58:27 +07:00
|
|
|
dev->msi_enabled = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
|
|
|
msix_disable(control);
|
|
|
|
pci_write_config_word(dev, msi_control_reg(pos), control);
|
2006-05-26 09:58:27 +07:00
|
|
|
dev->msix_enabled = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
|
|
|
|
/* PCI Express Endpoint device detected */
|
2005-08-16 02:23:41 +07:00
|
|
|
pci_intx(dev, 1); /* enable intx */
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_lookup_vector(struct pci_dev *dev, int type)
|
|
|
|
{
|
|
|
|
int vector;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
|
|
|
|
if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
|
|
|
|
msi_desc[vector]->msi_attrib.type != type ||
|
|
|
|
msi_desc[vector]->msi_attrib.default_vector != dev->irq)
|
|
|
|
continue;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
/* This pre-assigned MSI vector for this device
|
|
|
|
already exits. Override dev->irq with this vector */
|
|
|
|
dev->irq = vector;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_scan_msi_device(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
if (!dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
|
|
|
|
nr_msix_devices++;
|
|
|
|
else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
|
|
|
|
nr_reserved_vectors++;
|
|
|
|
}
|
|
|
|
|
2006-02-08 16:11:38 +07:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
int pci_save_msi_state(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
int pos, i = 0;
|
|
|
|
u16 control;
|
|
|
|
struct pci_cap_saved_state *save_state;
|
|
|
|
u32 *cap;
|
|
|
|
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
|
|
|
if (pos <= 0 || dev->no_msi)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
if (!(control & PCI_MSI_FLAGS_ENABLE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!save_state) {
|
|
|
|
printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
cap = &save_state->data[0];
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, pos, &cap[i++]);
|
|
|
|
control = cap[0] >> 16;
|
|
|
|
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
|
|
|
|
if (control & PCI_MSI_FLAGS_64BIT) {
|
|
|
|
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
|
|
|
|
pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
|
|
|
|
} else
|
|
|
|
pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
|
|
|
|
if (control & PCI_MSI_FLAGS_MASKBIT)
|
|
|
|
pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
|
|
|
|
save_state->cap_nr = PCI_CAP_ID_MSI;
|
|
|
|
pci_add_saved_cap(dev, save_state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_restore_msi_state(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
int i = 0, pos;
|
|
|
|
u16 control;
|
|
|
|
struct pci_cap_saved_state *save_state;
|
|
|
|
u32 *cap;
|
|
|
|
|
|
|
|
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
|
|
|
if (!save_state || pos <= 0)
|
|
|
|
return;
|
|
|
|
cap = &save_state->data[0];
|
|
|
|
|
|
|
|
control = cap[i++] >> 16;
|
|
|
|
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
|
|
|
|
if (control & PCI_MSI_FLAGS_64BIT) {
|
|
|
|
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
|
|
|
|
pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
|
|
|
|
} else
|
|
|
|
pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
|
|
|
|
if (control & PCI_MSI_FLAGS_MASKBIT)
|
|
|
|
pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
|
|
|
|
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
|
|
|
|
enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
|
|
|
pci_remove_saved_cap(save_state);
|
|
|
|
kfree(save_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_save_msix_state(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
int pos;
|
2006-04-11 09:17:48 +07:00
|
|
|
int temp;
|
|
|
|
int vector, head, tail = 0;
|
2006-02-08 16:11:38 +07:00
|
|
|
u16 control;
|
|
|
|
struct pci_cap_saved_state *save_state;
|
|
|
|
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (pos <= 0 || dev->no_msi)
|
|
|
|
return 0;
|
|
|
|
|
2006-04-11 09:17:48 +07:00
|
|
|
/* save the capability */
|
2006-02-08 16:11:38 +07:00
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
if (!(control & PCI_MSIX_FLAGS_ENABLE))
|
|
|
|
return 0;
|
|
|
|
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!save_state) {
|
|
|
|
printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
*((u16 *)&save_state->data[0]) = control;
|
|
|
|
|
2006-04-11 09:17:48 +07:00
|
|
|
/* save the table */
|
|
|
|
temp = dev->irq;
|
|
|
|
if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
|
|
|
|
kfree(save_state);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
vector = head = dev->irq;
|
|
|
|
while (head != tail) {
|
|
|
|
struct msi_desc *entry;
|
|
|
|
|
|
|
|
entry = msi_desc[vector];
|
2006-10-04 16:16:33 +07:00
|
|
|
read_msi_msg(entry, &entry->msg_save);
|
2006-04-11 09:17:48 +07:00
|
|
|
|
|
|
|
tail = msi_desc[vector]->link.tail;
|
|
|
|
vector = tail;
|
|
|
|
}
|
|
|
|
dev->irq = temp;
|
|
|
|
|
2006-02-08 16:11:38 +07:00
|
|
|
save_state->cap_nr = PCI_CAP_ID_MSIX;
|
|
|
|
pci_add_saved_cap(dev, save_state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_restore_msix_state(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
u16 save;
|
|
|
|
int pos;
|
|
|
|
int vector, head, tail = 0;
|
|
|
|
struct msi_desc *entry;
|
|
|
|
int temp;
|
|
|
|
struct pci_cap_saved_state *save_state;
|
|
|
|
|
|
|
|
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (!save_state)
|
|
|
|
return;
|
|
|
|
save = *((u16 *)&save_state->data[0]);
|
|
|
|
pci_remove_saved_cap(save_state);
|
|
|
|
kfree(save_state);
|
|
|
|
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (pos <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* route the table */
|
|
|
|
temp = dev->irq;
|
|
|
|
if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
|
|
|
|
return;
|
|
|
|
vector = head = dev->irq;
|
|
|
|
while (head != tail) {
|
|
|
|
entry = msi_desc[vector];
|
2006-10-04 16:16:33 +07:00
|
|
|
write_msi_msg(entry, &entry->msg_save);
|
2006-02-08 16:11:38 +07:00
|
|
|
|
|
|
|
tail = msi_desc[vector]->link.tail;
|
|
|
|
vector = tail;
|
|
|
|
}
|
|
|
|
dev->irq = temp;
|
|
|
|
|
|
|
|
pci_write_config_word(dev, msi_control_reg(pos), save);
|
|
|
|
enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-04-11 09:17:48 +07:00
|
|
|
static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
|
2006-02-08 16:11:38 +07:00
|
|
|
{
|
2006-04-11 09:17:48 +07:00
|
|
|
int status;
|
2006-10-04 16:16:33 +07:00
|
|
|
struct msi_msg msg;
|
2006-02-08 16:11:38 +07:00
|
|
|
int pos, vector = dev->irq;
|
|
|
|
u16 control;
|
|
|
|
|
2006-10-04 16:16:33 +07:00
|
|
|
pos = entry->msi_attrib.pos;
|
2006-02-08 16:11:38 +07:00
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
2006-04-11 09:17:48 +07:00
|
|
|
|
2006-02-08 16:11:38 +07:00
|
|
|
/* Configure MSI capability structure */
|
2006-10-04 16:16:33 +07:00
|
|
|
status = msi_ops->setup(dev, vector, &msg.address_hi, &msg.address_lo, &msg.data);
|
2006-04-11 09:17:48 +07:00
|
|
|
if (status < 0)
|
|
|
|
return status;
|
|
|
|
|
2006-10-04 16:16:33 +07:00
|
|
|
write_msi_msg(entry, &msg);
|
2006-02-08 16:11:38 +07:00
|
|
|
if (entry->msi_attrib.maskbit) {
|
|
|
|
unsigned int maskbits, temp;
|
|
|
|
/* All MSIs are unmasked by default, Mask them all */
|
|
|
|
pci_read_config_dword(dev,
|
|
|
|
msi_mask_bits_reg(pos, is_64bit_address(control)),
|
|
|
|
&maskbits);
|
|
|
|
temp = (1 << multi_msi_capable(control));
|
|
|
|
temp = ((temp - 1) & ~temp);
|
|
|
|
maskbits |= temp;
|
|
|
|
pci_write_config_dword(dev,
|
|
|
|
msi_mask_bits_reg(pos, is_64bit_address(control)),
|
|
|
|
maskbits);
|
|
|
|
}
|
2006-04-11 09:17:48 +07:00
|
|
|
|
|
|
|
return 0;
|
2006-02-08 16:11:38 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/**
|
|
|
|
* msi_capability_init - configure device's MSI capability structure
|
|
|
|
* @dev: pointer to the pci_dev data structure of MSI device function
|
|
|
|
*
|
2005-05-04 07:38:30 +07:00
|
|
|
* Setup the MSI capability structure of device function with a single
|
2005-04-17 05:20:36 +07:00
|
|
|
* MSI vector, regardless of device function is capable of handling
|
|
|
|
* multiple messages. A return of zero indicates the successful setup
|
|
|
|
* of an entry zero with the new MSI vector or non-zero for otherwise.
|
|
|
|
**/
|
|
|
|
static int msi_capability_init(struct pci_dev *dev)
|
|
|
|
{
|
2006-04-11 09:17:48 +07:00
|
|
|
int status;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct msi_desc *entry;
|
|
|
|
int pos, vector;
|
|
|
|
u16 control;
|
|
|
|
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
/* MSI Entry Initialization */
|
2006-01-14 14:34:53 +07:00
|
|
|
entry = alloc_msi_entry();
|
|
|
|
if (!entry)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
vector = get_msi_vector(dev);
|
|
|
|
if (vector < 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
kmem_cache_free(msi_cachep, entry);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
entry->link.head = vector;
|
|
|
|
entry->link.tail = vector;
|
|
|
|
entry->msi_attrib.type = PCI_CAP_ID_MSI;
|
|
|
|
entry->msi_attrib.state = 0; /* Mark it not active */
|
2006-10-04 16:16:33 +07:00
|
|
|
entry->msi_attrib.is_64 = is_64bit_address(control);
|
2005-04-17 05:20:36 +07:00
|
|
|
entry->msi_attrib.entry_nr = 0;
|
|
|
|
entry->msi_attrib.maskbit = is_mask_bit_support(control);
|
|
|
|
entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
|
2006-10-04 16:16:33 +07:00
|
|
|
entry->msi_attrib.pos = pos;
|
2005-04-17 05:20:36 +07:00
|
|
|
dev->irq = vector;
|
|
|
|
entry->dev = dev;
|
|
|
|
if (is_mask_bit_support(control)) {
|
|
|
|
entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
|
|
|
|
is_64bit_address(control));
|
|
|
|
}
|
|
|
|
/* Replace with MSI handler */
|
|
|
|
irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
|
|
|
|
/* Configure MSI capability structure */
|
2006-04-11 09:17:48 +07:00
|
|
|
status = msi_register_init(dev, entry);
|
|
|
|
if (status != 0) {
|
|
|
|
dev->irq = entry->msi_attrib.default_vector;
|
|
|
|
kmem_cache_free(msi_cachep, entry);
|
|
|
|
return status;
|
|
|
|
}
|
2006-02-08 16:11:38 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
attach_msi_entry(entry, vector);
|
|
|
|
/* Set MSI enabled bits */
|
|
|
|
enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msix_capability_init - configure device's MSI-X capability
|
|
|
|
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
2005-10-24 01:57:38 +07:00
|
|
|
* @entries: pointer to an array of struct msix_entry entries
|
|
|
|
* @nvec: number of @entries
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2005-05-04 07:38:30 +07:00
|
|
|
* Setup the MSI-X capability structure of device function with a
|
2005-04-17 05:20:36 +07:00
|
|
|
* single MSI-X vector. A return of zero indicates the successful setup of
|
|
|
|
* requested MSI-X entries with allocated vectors or non-zero for otherwise.
|
|
|
|
**/
|
|
|
|
static int msix_capability_init(struct pci_dev *dev,
|
|
|
|
struct msix_entry *entries, int nvec)
|
|
|
|
{
|
|
|
|
struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
|
2006-10-04 16:16:33 +07:00
|
|
|
struct msi_msg msg;
|
2006-04-11 09:17:48 +07:00
|
|
|
int status;
|
2005-04-17 05:20:36 +07:00
|
|
|
int vector, pos, i, j, nr_entries, temp = 0;
|
2006-02-17 14:58:29 +07:00
|
|
|
unsigned long phys_addr;
|
|
|
|
u32 table_offset;
|
2005-04-17 05:20:36 +07:00
|
|
|
u16 control;
|
|
|
|
u8 bir;
|
|
|
|
void __iomem *base;
|
|
|
|
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
/* Request & Map MSI-X table region */
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
nr_entries = multi_msix_capable(control);
|
2006-02-17 14:58:29 +07:00
|
|
|
|
|
|
|
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
|
2005-04-17 05:20:36 +07:00
|
|
|
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
|
2006-02-17 14:58:29 +07:00
|
|
|
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
|
|
|
|
phys_addr = pci_resource_start (dev, bir) + table_offset;
|
2005-04-17 05:20:36 +07:00
|
|
|
base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
|
|
|
|
if (base == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* MSI-X Table Initialization */
|
|
|
|
for (i = 0; i < nvec; i++) {
|
|
|
|
entry = alloc_msi_entry();
|
|
|
|
if (!entry)
|
|
|
|
break;
|
2006-01-14 14:34:53 +07:00
|
|
|
vector = get_msi_vector(dev);
|
2006-04-17 09:02:54 +07:00
|
|
|
if (vector < 0) {
|
|
|
|
kmem_cache_free(msi_cachep, entry);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
2006-04-17 09:02:54 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
j = entries[i].entry;
|
|
|
|
entries[i].vector = vector;
|
|
|
|
entry->msi_attrib.type = PCI_CAP_ID_MSIX;
|
|
|
|
entry->msi_attrib.state = 0; /* Mark it not active */
|
2006-10-04 16:16:33 +07:00
|
|
|
entry->msi_attrib.is_64 = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
entry->msi_attrib.entry_nr = j;
|
|
|
|
entry->msi_attrib.maskbit = 1;
|
|
|
|
entry->msi_attrib.default_vector = dev->irq;
|
2006-10-04 16:16:33 +07:00
|
|
|
entry->msi_attrib.pos = pos;
|
2005-04-17 05:20:36 +07:00
|
|
|
entry->dev = dev;
|
|
|
|
entry->mask_base = base;
|
|
|
|
if (!head) {
|
|
|
|
entry->link.head = vector;
|
|
|
|
entry->link.tail = vector;
|
|
|
|
head = entry;
|
|
|
|
} else {
|
|
|
|
entry->link.head = temp;
|
|
|
|
entry->link.tail = tail->link.tail;
|
|
|
|
tail->link.tail = vector;
|
|
|
|
head->link.head = vector;
|
|
|
|
}
|
|
|
|
temp = vector;
|
|
|
|
tail = entry;
|
|
|
|
/* Replace with MSI-X handler */
|
|
|
|
irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
|
|
|
|
/* Configure MSI-X capability structure */
|
2006-04-11 09:17:48 +07:00
|
|
|
status = msi_ops->setup(dev, vector,
|
2006-10-04 16:16:33 +07:00
|
|
|
&msg.address_hi,
|
|
|
|
&msg.address_lo,
|
|
|
|
&msg.data);
|
2006-04-11 09:17:48 +07:00
|
|
|
if (status < 0)
|
|
|
|
break;
|
|
|
|
|
2006-10-04 16:16:33 +07:00
|
|
|
write_msi_msg(entry, &msg);
|
2005-04-17 05:20:36 +07:00
|
|
|
attach_msi_entry(entry, vector);
|
|
|
|
}
|
|
|
|
if (i != nvec) {
|
|
|
|
i--;
|
|
|
|
for (; i >= 0; i--) {
|
|
|
|
vector = (entries + i)->vector;
|
|
|
|
msi_free_vector(dev, vector, 0);
|
|
|
|
(entries + i)->vector = 0;
|
|
|
|
}
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
/* Set MSI-X enabled bits */
|
|
|
|
enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-08-31 12:55:07 +07:00
|
|
|
/**
|
|
|
|
* pci_msi_supported - check whether MSI may be enabled on device
|
|
|
|
* @dev: pointer to the pci_dev data structure of MSI device function
|
|
|
|
*
|
|
|
|
* MSI must be globally enabled and supported by the device and its root
|
|
|
|
* bus. But, the root bus is not easy to find since some architectures
|
|
|
|
* have virtual busses on top of the PCI hierarchy (for instance the
|
|
|
|
* hypertransport bus), while the actual bus where MSI must be supported
|
|
|
|
* is below. So we test the MSI flag on all parent busses and assume
|
|
|
|
* that no quirk will ever set the NO_MSI flag on a non-root bus.
|
|
|
|
**/
|
|
|
|
static
|
|
|
|
int pci_msi_supported(struct pci_dev * dev)
|
|
|
|
{
|
|
|
|
struct pci_bus *bus;
|
|
|
|
|
|
|
|
if (!pci_msi_enable || !dev || dev->no_msi)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* check MSI flags of all parent busses */
|
|
|
|
for (bus = dev->bus; bus; bus = bus->parent)
|
|
|
|
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/**
|
|
|
|
* pci_enable_msi - configure device's MSI capability structure
|
|
|
|
* @dev: pointer to the pci_dev data structure of MSI device function
|
|
|
|
*
|
|
|
|
* Setup the MSI capability structure of device function with
|
|
|
|
* a single MSI vector upon its software driver call to request for
|
|
|
|
* MSI mode enabled on its hardware device function. A return of zero
|
|
|
|
* indicates the successful setup of an entry zero with the new MSI
|
|
|
|
* vector or non-zero for otherwise.
|
|
|
|
**/
|
|
|
|
int pci_enable_msi(struct pci_dev* dev)
|
|
|
|
{
|
2006-08-31 12:55:07 +07:00
|
|
|
int pos, temp, status;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-08-31 12:55:07 +07:00
|
|
|
if (pci_msi_supported(dev) < 0)
|
|
|
|
return -EINVAL;
|
2006-02-14 23:52:22 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
temp = dev->irq;
|
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
status = msi_init();
|
|
|
|
if (status < 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return status;
|
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
|
|
|
if (!pos)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2006-10-04 16:16:31 +07:00
|
|
|
WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSI));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Check whether driver already requested for MSI-X vectors */
|
2006-01-14 14:34:53 +07:00
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
printk(KERN_INFO "PCI: %s: Can't enable MSI. "
|
|
|
|
"Device already has MSI-X vectors assigned\n",
|
|
|
|
pci_name(dev));
|
|
|
|
dev->irq = temp;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
status = msi_capability_init(dev);
|
|
|
|
if (!status) {
|
|
|
|
if (!pos)
|
|
|
|
nr_reserved_vectors--; /* Only MSI capable */
|
|
|
|
else if (nr_msix_devices > 0)
|
|
|
|
nr_msix_devices--; /* Both MSI and MSI-X capable,
|
|
|
|
but choose enabling MSI */
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_disable_msi(struct pci_dev* dev)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
int pos, default_vector;
|
|
|
|
u16 control;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-03-06 12:33:34 +07:00
|
|
|
if (!pci_msi_enable)
|
|
|
|
return;
|
2006-01-14 14:34:53 +07:00
|
|
|
if (!dev)
|
|
|
|
return;
|
2006-03-06 12:33:34 +07:00
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
|
|
|
if (!pos)
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
if (!(control & PCI_MSI_FLAGS_ENABLE))
|
|
|
|
return;
|
|
|
|
|
2006-10-04 16:16:31 +07:00
|
|
|
disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
entry = msi_desc[dev->irq];
|
|
|
|
if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (entry->msi_attrib.state) {
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
|
|
|
|
"free_irq() on MSI vector %d\n",
|
|
|
|
pci_name(dev), dev->irq);
|
|
|
|
BUG_ON(entry->msi_attrib.state > 0);
|
|
|
|
} else {
|
|
|
|
default_vector = entry->msi_attrib.default_vector;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
2006-10-04 16:16:31 +07:00
|
|
|
msi_free_vector(dev, dev->irq, 0);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Restore dev->irq to its default pin-assertion vector */
|
|
|
|
dev->irq = default_vector;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
|
|
|
|
{
|
|
|
|
struct msi_desc *entry;
|
|
|
|
int head, entry_nr, type;
|
|
|
|
void __iomem *base;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-04-11 09:17:48 +07:00
|
|
|
msi_ops->teardown(vector);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
entry = msi_desc[vector];
|
|
|
|
if (!entry || entry->dev != dev) {
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
type = entry->msi_attrib.type;
|
|
|
|
entry_nr = entry->msi_attrib.entry_nr;
|
|
|
|
head = entry->link.head;
|
|
|
|
base = entry->mask_base;
|
|
|
|
msi_desc[entry->link.head]->link.tail = entry->link.tail;
|
|
|
|
msi_desc[entry->link.tail]->link.head = entry->link.head;
|
|
|
|
entry->dev = NULL;
|
|
|
|
if (!reassign) {
|
|
|
|
vector_irq[vector] = 0;
|
|
|
|
nr_released_vectors++;
|
|
|
|
}
|
|
|
|
msi_desc[vector] = NULL;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
|
|
|
|
kmem_cache_free(msi_cachep, entry);
|
|
|
|
|
|
|
|
if (type == PCI_CAP_ID_MSIX) {
|
|
|
|
if (!reassign)
|
|
|
|
writel(1, base +
|
|
|
|
entry_nr * PCI_MSIX_ENTRY_SIZE +
|
|
|
|
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
|
|
|
|
|
2006-06-01 13:35:47 +07:00
|
|
|
if (head == vector)
|
2005-04-17 05:20:36 +07:00
|
|
|
iounmap(base);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pci_enable_msix - configure device's MSI-X capability structure
|
|
|
|
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
2005-06-07 13:07:46 +07:00
|
|
|
* @entries: pointer to an array of MSI-X entries
|
2005-04-17 05:20:36 +07:00
|
|
|
* @nvec: number of MSI-X vectors requested for allocation by device driver
|
|
|
|
*
|
|
|
|
* Setup the MSI-X capability structure of device function with the number
|
|
|
|
* of requested vectors upon its software driver call to request for
|
|
|
|
* MSI-X mode enabled on its hardware device function. A return of zero
|
|
|
|
* indicates the successful configuration of MSI-X capability structure
|
|
|
|
* with new allocated MSI-X vectors. A return of < 0 indicates a failure.
|
|
|
|
* Or a return of > 0 indicates that driver request is exceeding the number
|
|
|
|
* of vectors available. Driver should use the returned value to re-send
|
|
|
|
* its request.
|
|
|
|
**/
|
|
|
|
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
|
|
|
|
{
|
|
|
|
int status, pos, nr_entries, free_vectors;
|
|
|
|
int i, j, temp;
|
|
|
|
u16 control;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-08-31 12:55:07 +07:00
|
|
|
if (!entries || pci_msi_supported(dev) < 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
status = msi_init();
|
|
|
|
if (status < 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return status;
|
|
|
|
|
2006-01-14 14:34:53 +07:00
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (!pos)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
nr_entries = multi_msix_capable(control);
|
|
|
|
if (nvec > nr_entries)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Check for any invalid entries */
|
|
|
|
for (i = 0; i < nvec; i++) {
|
|
|
|
if (entries[i].entry >= nr_entries)
|
|
|
|
return -EINVAL; /* invalid entry */
|
|
|
|
for (j = i + 1; j < nvec; j++) {
|
|
|
|
if (entries[i].entry == entries[j].entry)
|
|
|
|
return -EINVAL; /* duplicate entry */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
temp = dev->irq;
|
2006-10-04 16:16:31 +07:00
|
|
|
WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSIX));
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Check whether driver already requested for MSI vector */
|
|
|
|
if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
|
|
|
|
!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
|
|
|
|
printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
|
|
|
|
"Device already has an MSI vector assigned\n",
|
|
|
|
pci_name(dev));
|
|
|
|
dev->irq = temp;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
/*
|
|
|
|
* msi_lock is provided to ensure that enough vectors resources are
|
|
|
|
* available before granting.
|
|
|
|
*/
|
|
|
|
free_vectors = pci_vector_resources(last_alloc_vector,
|
|
|
|
nr_released_vectors);
|
|
|
|
/* Ensure that each MSI/MSI-X device has one vector reserved by
|
|
|
|
default to avoid any MSI-X driver to take all available
|
|
|
|
resources */
|
|
|
|
free_vectors -= nr_reserved_vectors;
|
|
|
|
/* Find the average of free vectors among MSI-X devices */
|
|
|
|
if (nr_msix_devices > 0)
|
|
|
|
free_vectors /= nr_msix_devices;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
|
|
|
|
if (nvec > free_vectors) {
|
|
|
|
if (free_vectors > 0)
|
|
|
|
return free_vectors;
|
|
|
|
else
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = msix_capability_init(dev, entries, nvec);
|
|
|
|
if (!status && nr_msix_devices > 0)
|
|
|
|
nr_msix_devices--;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_disable_msix(struct pci_dev* dev)
|
|
|
|
{
|
|
|
|
int pos, temp;
|
|
|
|
u16 control;
|
|
|
|
|
2006-03-06 12:33:34 +07:00
|
|
|
if (!pci_msi_enable)
|
|
|
|
return;
|
2006-01-14 14:34:53 +07:00
|
|
|
if (!dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (!pos)
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, msi_control_reg(pos), &control);
|
|
|
|
if (!(control & PCI_MSIX_FLAGS_ENABLE))
|
|
|
|
return;
|
|
|
|
|
2006-10-04 16:16:31 +07:00
|
|
|
disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
temp = dev->irq;
|
|
|
|
if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
|
|
|
|
int state, vector, head, tail = 0, warning = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
vector = head = dev->irq;
|
2006-10-04 16:16:31 +07:00
|
|
|
dev->irq = temp; /* Restore pin IRQ */
|
2005-04-17 05:20:36 +07:00
|
|
|
while (head != tail) {
|
2006-10-04 16:16:31 +07:00
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
state = msi_desc[vector]->msi_attrib.state;
|
2006-10-04 16:16:31 +07:00
|
|
|
tail = msi_desc[vector]->link.tail;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (state)
|
|
|
|
warning = 1;
|
2006-10-04 16:16:31 +07:00
|
|
|
else if (vector != head) /* Release MSI-X vector */
|
|
|
|
msi_free_vector(dev, vector, 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
vector = tail;
|
|
|
|
}
|
2006-10-04 16:16:31 +07:00
|
|
|
msi_free_vector(dev, vector, 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (warning) {
|
|
|
|
printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
|
|
|
|
"free_irq() on all MSI-X vectors\n",
|
|
|
|
pci_name(dev));
|
|
|
|
BUG_ON(warning > 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
|
|
|
|
* @dev: pointer to the pci_dev data structure of MSI(X) device function
|
|
|
|
*
|
2005-05-04 07:38:30 +07:00
|
|
|
* Being called during hotplug remove, from which the device function
|
2005-04-17 05:20:36 +07:00
|
|
|
* is hot-removed. All previous assigned MSI/MSI-X vectors, if
|
|
|
|
* allocated for this device function, are reclaimed to unused state,
|
|
|
|
* which may be used later on.
|
|
|
|
**/
|
|
|
|
void msi_remove_pci_irq_vectors(struct pci_dev* dev)
|
|
|
|
{
|
|
|
|
int state, pos, temp;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!pci_msi_enable || !dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
temp = dev->irq; /* Save IOAPIC IRQ */
|
2006-01-14 14:34:53 +07:00
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
|
|
|
|
if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
state = msi_desc[dev->irq]->msi_attrib.state;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
if (state) {
|
|
|
|
printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
|
|
|
|
"called without free_irq() on MSI vector %d\n",
|
|
|
|
pci_name(dev), dev->irq);
|
|
|
|
BUG_ON(state > 0);
|
|
|
|
} else /* Release MSI vector assigned to this device */
|
|
|
|
msi_free_vector(dev, dev->irq, 0);
|
|
|
|
dev->irq = temp; /* Restore IOAPIC IRQ */
|
|
|
|
}
|
2006-01-14 14:34:53 +07:00
|
|
|
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
|
|
|
|
if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
int vector, head, tail = 0, warning = 0;
|
|
|
|
void __iomem *base = NULL;
|
|
|
|
|
|
|
|
vector = head = dev->irq;
|
|
|
|
while (head != tail) {
|
|
|
|
spin_lock_irqsave(&msi_lock, flags);
|
|
|
|
state = msi_desc[vector]->msi_attrib.state;
|
|
|
|
tail = msi_desc[vector]->link.tail;
|
|
|
|
base = msi_desc[vector]->mask_base;
|
|
|
|
spin_unlock_irqrestore(&msi_lock, flags);
|
|
|
|
if (state)
|
|
|
|
warning = 1;
|
|
|
|
else if (vector != head) /* Release MSI-X vector */
|
|
|
|
msi_free_vector(dev, vector, 0);
|
|
|
|
vector = tail;
|
|
|
|
}
|
|
|
|
msi_free_vector(dev, vector, 0);
|
|
|
|
if (warning) {
|
|
|
|
iounmap(base);
|
|
|
|
printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
|
|
|
|
"called without free_irq() on all MSI-X vectors\n",
|
|
|
|
pci_name(dev));
|
|
|
|
BUG_ON(warning > 0);
|
|
|
|
}
|
|
|
|
dev->irq = temp; /* Restore IOAPIC IRQ */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-06 12:33:34 +07:00
|
|
|
void pci_no_msi(void)
|
|
|
|
{
|
|
|
|
pci_msi_enable = 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
EXPORT_SYMBOL(pci_enable_msi);
|
|
|
|
EXPORT_SYMBOL(pci_disable_msi);
|
|
|
|
EXPORT_SYMBOL(pci_enable_msix);
|
|
|
|
EXPORT_SYMBOL(pci_disable_msix);
|