2006-10-04 16:16:59 +07:00
|
|
|
#ifndef LINUX_MSI_H
|
|
|
|
#define LINUX_MSI_H
|
|
|
|
|
2011-10-07 01:08:18 +07:00
|
|
|
#include <linux/kobject.h>
|
2007-04-05 14:19:10 +07:00
|
|
|
#include <linux/list.h>
|
|
|
|
|
2006-10-04 16:16:59 +07:00
|
|
|
struct msi_msg {
|
|
|
|
u32 address_lo; /* low 32 bits of msi message address */
|
|
|
|
u32 address_hi; /* high 32 bits of msi message address */
|
|
|
|
u32 data; /* 16 bits of msi message data */
|
|
|
|
};
|
|
|
|
|
2014-10-27 09:44:36 +07:00
|
|
|
extern int pci_msi_ignore_mask;
|
2007-01-18 11:50:05 +07:00
|
|
|
/* Helper functions */
|
2010-09-28 21:46:51 +07:00
|
|
|
struct irq_data;
|
2010-09-29 00:09:51 +07:00
|
|
|
struct msi_desc;
|
2013-04-18 23:55:46 +07:00
|
|
|
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
|
|
|
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
|
2014-11-09 22:10:33 +07:00
|
|
|
|
2006-10-04 16:16:59 +07:00
|
|
|
struct msi_desc {
|
|
|
|
struct {
|
2009-03-17 19:54:06 +07:00
|
|
|
__u8 is_msix : 1;
|
2014-06-19 15:30:30 +07:00
|
|
|
__u8 multiple: 3; /* log2 num of messages allocated */
|
|
|
|
__u8 multi_cap : 3; /* log2 num of messages supported */
|
2013-11-15 01:28:18 +07:00
|
|
|
__u8 maskbit : 1; /* mask-pending bit supported ? */
|
|
|
|
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
|
|
|
|
__u16 entry_nr; /* specific enabled entry */
|
|
|
|
unsigned default_irq; /* default pre-assigned irq */
|
2009-03-17 19:54:09 +07:00
|
|
|
} msi_attrib;
|
2006-10-04 16:16:59 +07:00
|
|
|
|
2009-03-17 19:54:09 +07:00
|
|
|
u32 masked; /* mask bits */
|
2007-04-05 14:19:10 +07:00
|
|
|
unsigned int irq;
|
PCI: Allocate only as many MSI vectors as requested by driver
Because of the encoding of the "Multiple Message Capable" and "Multiple
Message Enable" fields, a device can only advertise that it's capable of a
power-of-two number of vectors, and the OS can only enable a power-of-two
number.
For example, a device that's limited internally to using 18 vectors would
have to advertise that it's capable of 32. The 14 extra vectors consume
vector numbers and IRQ descriptors even though the device can't actually
use them.
This fix introduces a 'msi_desc::nvec_used' field to address this issue.
When non-zero, it is the actual number of MSIs the device will send, as
requested by the device driver. This value should be used by architectures
to set up and tear down only as many interrupt resources as the device will
actually use.
Note, although the existing 'msi_desc::multiple' field might seem
redundant, in fact it is not. The number of MSIs advertised need not be
the smallest power-of-two larger than the number of MSIs the device will
send. Thus, it is not always possible to derive the former from the
latter, so we need to keep them both to handle this case.
[bhelgaas: changelog, rename to "nvec_used"]
Signed-off-by: Alexander Gordeev <agordeev@redhat.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-05-13 16:05:48 +07:00
|
|
|
unsigned int nvec_used; /* number of messages */
|
2007-04-05 14:19:10 +07:00
|
|
|
struct list_head list;
|
2006-10-04 16:16:59 +07:00
|
|
|
|
2009-03-17 19:54:08 +07:00
|
|
|
union {
|
|
|
|
void __iomem *mask_base;
|
|
|
|
u8 mask_pos;
|
|
|
|
};
|
2006-10-04 16:16:59 +07:00
|
|
|
struct pci_dev *dev;
|
|
|
|
|
2007-03-09 03:04:57 +07:00
|
|
|
/* Last set MSI message */
|
|
|
|
struct msi_msg msg;
|
2006-10-04 16:16:59 +07:00
|
|
|
};
|
|
|
|
|
2014-11-15 21:24:03 +07:00
|
|
|
/* Helpers to hide struct msi_desc implementation details */
|
|
|
|
#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
|
|
|
|
#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
|
|
|
|
#define first_msi_entry(dev) \
|
|
|
|
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
|
|
|
|
#define for_each_msi_entry(desc, dev) \
|
|
|
|
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
|
|
|
|
#define for_each_pci_msi_entry(desc, pdev) \
|
|
|
|
for_each_msi_entry((desc), &(pdev)->dev)
|
|
|
|
|
|
|
|
static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->dev;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
|
|
|
|
2014-11-09 22:10:33 +07:00
|
|
|
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
2014-11-09 22:10:34 +07:00
|
|
|
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
|
|
|
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
|
|
|
|
|
2014-11-23 17:55:58 +07:00
|
|
|
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
|
|
|
|
u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
|
|
|
|
void pci_msi_mask_irq(struct irq_data *data);
|
|
|
|
void pci_msi_unmask_irq(struct irq_data *data);
|
|
|
|
|
2014-11-09 22:10:34 +07:00
|
|
|
/* Conversion helpers. Should be removed after merging */
|
|
|
|
static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
__pci_write_msi_msg(entry, msg);
|
|
|
|
}
|
|
|
|
static inline void write_msi_msg(int irq, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
pci_write_msi_msg(irq, msg);
|
|
|
|
}
|
2014-11-23 17:55:58 +07:00
|
|
|
static inline void mask_msi_irq(struct irq_data *data)
|
|
|
|
{
|
|
|
|
pci_msi_mask_irq(data);
|
|
|
|
}
|
|
|
|
static inline void unmask_msi_irq(struct irq_data *data)
|
|
|
|
{
|
|
|
|
pci_msi_unmask_irq(data);
|
|
|
|
}
|
2014-11-09 22:10:33 +07:00
|
|
|
|
2006-10-04 16:16:59 +07:00
|
|
|
/*
|
2013-08-10 03:27:06 +07:00
|
|
|
* The arch hooks to setup up msi irqs. Those functions are
|
|
|
|
* implemented as weak symbols so that they /can/ be overriden by
|
|
|
|
* architecture specific code if needed.
|
2006-10-04 16:16:59 +07:00
|
|
|
*/
|
2007-01-29 02:56:37 +07:00
|
|
|
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
|
2006-10-04 16:16:59 +07:00
|
|
|
void arch_teardown_msi_irq(unsigned int irq);
|
2013-04-18 23:55:46 +07:00
|
|
|
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
|
|
|
|
void arch_teardown_msi_irqs(struct pci_dev *dev);
|
2013-12-04 12:09:16 +07:00
|
|
|
void arch_restore_msi_irqs(struct pci_dev *dev);
|
2013-08-10 03:27:06 +07:00
|
|
|
|
|
|
|
void default_teardown_msi_irqs(struct pci_dev *dev);
|
2013-12-04 12:09:16 +07:00
|
|
|
void default_restore_msi_irqs(struct pci_dev *dev);
|
2006-10-04 16:16:59 +07:00
|
|
|
|
2014-11-12 07:45:45 +07:00
|
|
|
struct msi_controller {
|
2013-08-10 03:27:08 +07:00
|
|
|
struct module *owner;
|
|
|
|
struct device *dev;
|
2013-08-10 03:27:09 +07:00
|
|
|
struct device_node *of_node;
|
|
|
|
struct list_head list;
|
2013-08-10 03:27:08 +07:00
|
|
|
|
2014-11-12 07:45:45 +07:00
|
|
|
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
|
2013-08-10 03:27:08 +07:00
|
|
|
struct msi_desc *desc);
|
2014-11-12 07:45:45 +07:00
|
|
|
void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
|
2013-08-10 03:27:08 +07:00
|
|
|
};
|
|
|
|
|
2014-11-12 17:39:03 +07:00
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
2014-11-15 21:24:04 +07:00
|
|
|
|
2014-11-15 21:24:05 +07:00
|
|
|
#include <linux/irqhandler.h>
|
2014-11-15 21:24:04 +07:00
|
|
|
#include <asm/msi.h>
|
|
|
|
|
2014-11-12 17:39:03 +07:00
|
|
|
struct irq_domain;
|
|
|
|
struct irq_chip;
|
|
|
|
struct device_node;
|
|
|
|
struct msi_domain_info;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct msi_domain_ops - MSI interrupt domain callbacks
|
|
|
|
* @get_hwirq: Retrieve the resulting hw irq number
|
|
|
|
* @msi_init: Domain specific init function for MSI interrupts
|
|
|
|
* @msi_free: Domain specific function to free a MSI interrupts
|
2014-11-15 21:24:04 +07:00
|
|
|
* @msi_check: Callback for verification of the domain/info/dev data
|
|
|
|
* @msi_prepare: Prepare the allocation of the interrupts in the domain
|
|
|
|
* @msi_finish: Optional callbacl to finalize the allocation
|
|
|
|
* @set_desc: Set the msi descriptor for an interrupt
|
|
|
|
* @handle_error: Optional error handler if the allocation fails
|
|
|
|
*
|
|
|
|
* @get_hwirq, @msi_init and @msi_free are callbacks used by
|
|
|
|
* msi_create_irq_domain() and related interfaces
|
|
|
|
*
|
|
|
|
* @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
|
|
|
|
* are callbacks used by msi_irq_domain_alloc_irqs() and related
|
|
|
|
* interfaces which are based on msi_desc.
|
2014-11-12 17:39:03 +07:00
|
|
|
*/
|
|
|
|
struct msi_domain_ops {
|
2014-11-15 21:24:05 +07:00
|
|
|
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
|
|
|
|
msi_alloc_info_t *arg);
|
2014-11-12 17:39:03 +07:00
|
|
|
int (*msi_init)(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
unsigned int virq, irq_hw_number_t hwirq,
|
2014-11-15 21:24:05 +07:00
|
|
|
msi_alloc_info_t *arg);
|
2014-11-12 17:39:03 +07:00
|
|
|
void (*msi_free)(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
unsigned int virq);
|
2014-11-15 21:24:04 +07:00
|
|
|
int (*msi_check)(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct device *dev);
|
|
|
|
int (*msi_prepare)(struct irq_domain *domain,
|
|
|
|
struct device *dev, int nvec,
|
|
|
|
msi_alloc_info_t *arg);
|
|
|
|
void (*msi_finish)(msi_alloc_info_t *arg, int retval);
|
|
|
|
void (*set_desc)(msi_alloc_info_t *arg,
|
|
|
|
struct msi_desc *desc);
|
|
|
|
int (*handle_error)(struct irq_domain *domain,
|
|
|
|
struct msi_desc *desc, int error);
|
2014-11-12 17:39:03 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct msi_domain_info - MSI interrupt domain data
|
2014-11-15 21:24:05 +07:00
|
|
|
* @flags: Flags to decribe features and capabilities
|
|
|
|
* @ops: The callback data structure
|
|
|
|
* @chip: Optional: associated interrupt chip
|
|
|
|
* @chip_data: Optional: associated interrupt chip data
|
|
|
|
* @handler: Optional: associated interrupt flow handler
|
|
|
|
* @handler_data: Optional: associated interrupt flow handler data
|
|
|
|
* @handler_name: Optional: associated interrupt flow handler name
|
|
|
|
* @data: Optional: domain specific data
|
2014-11-12 17:39:03 +07:00
|
|
|
*/
|
|
|
|
struct msi_domain_info {
|
2014-11-15 21:24:05 +07:00
|
|
|
u32 flags;
|
2014-11-12 17:39:03 +07:00
|
|
|
struct msi_domain_ops *ops;
|
|
|
|
struct irq_chip *chip;
|
2014-11-15 21:24:05 +07:00
|
|
|
void *chip_data;
|
|
|
|
irq_flow_handler_t handler;
|
|
|
|
void *handler_data;
|
|
|
|
const char *handler_name;
|
2014-11-12 17:39:03 +07:00
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
2014-11-15 21:24:05 +07:00
|
|
|
/* Flags for msi_domain_info */
|
|
|
|
enum {
|
|
|
|
/*
|
|
|
|
* Init non implemented ops callbacks with default MSI domain
|
|
|
|
* callbacks.
|
|
|
|
*/
|
|
|
|
MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
|
|
|
|
/*
|
|
|
|
* Init non implemented chip callbacks with default MSI chip
|
|
|
|
* callbacks.
|
|
|
|
*/
|
|
|
|
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
|
|
|
|
/* Build identity map between hwirq and irq */
|
|
|
|
MSI_FLAG_IDENTITY_MAP = (1 << 2),
|
|
|
|
/* Support multiple PCI MSI interrupts */
|
|
|
|
MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
|
|
|
|
/* Support PCI MSIX interrupts */
|
|
|
|
MSI_FLAG_PCI_MSIX = (1 << 4),
|
|
|
|
};
|
|
|
|
|
2014-11-12 17:39:03 +07:00
|
|
|
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|
|
|
bool force);
|
|
|
|
|
|
|
|
struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct irq_domain *parent);
|
2014-11-15 21:24:04 +07:00
|
|
|
int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
|
|
|
int nvec);
|
|
|
|
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
|
2014-11-12 17:39:03 +07:00
|
|
|
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
|
|
|
|
|
|
|
|
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
|
|
|
|
|
2006-10-04 16:16:59 +07:00
|
|
|
#endif /* LINUX_MSI_H */
|