2006-06-29 16:24:40 +07:00
|
|
|
#ifndef _LINUX_IRQ_H
|
|
|
|
#define _LINUX_IRQ_H
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Please do not include this file in generic code. There is currently
|
|
|
|
* no requirement for any architecture to implement anything held
|
|
|
|
* within this file.
|
|
|
|
*
|
|
|
|
* Thanks. --rmk
|
|
|
|
*/
|
|
|
|
|
2005-12-21 08:27:50 +07:00
|
|
|
#include <linux/smp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
#ifndef CONFIG_S390
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/cpumask.h>
|
2009-03-29 17:59:50 +07:00
|
|
|
#include <linux/gfp.h>
|
2006-06-23 16:06:00 +07:00
|
|
|
#include <linux/irqreturn.h>
|
2008-10-16 23:20:58 +07:00
|
|
|
#include <linux/irqnr.h>
|
2007-03-01 11:13:26 +07:00
|
|
|
#include <linux/errno.h>
|
2009-03-29 17:59:50 +07:00
|
|
|
#include <linux/topology.h>
|
2009-03-24 00:28:15 +07:00
|
|
|
#include <linux/wait.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/ptrace.h>
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 20:55:46 +07:00
|
|
|
#include <asm/irq_regs.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-05 19:06:34 +07:00
|
|
|
struct irq_desc;
|
2008-02-08 19:19:55 +07:00
|
|
|
typedef void (*irq_flow_handler_t)(unsigned int irq,
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 20:55:46 +07:00
|
|
|
struct irq_desc *desc);
|
2006-10-05 19:06:34 +07:00
|
|
|
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* IRQ line status.
|
2006-07-02 09:29:03 +07:00
|
|
|
*
|
2007-02-16 16:27:24 +07:00
|
|
|
* Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h
|
2006-07-02 09:29:03 +07:00
|
|
|
*
|
|
|
|
* IRQ types
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-07-02 09:29:03 +07:00
|
|
|
#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */
|
|
|
|
#define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */
|
|
|
|
#define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */
|
|
|
|
#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
|
|
|
|
#define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */
|
|
|
|
#define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */
|
|
|
|
#define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */
|
|
|
|
#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */
|
|
|
|
|
|
|
|
/* Internal flags */
|
2007-02-16 16:27:24 +07:00
|
|
|
#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */
|
|
|
|
#define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */
|
|
|
|
#define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */
|
|
|
|
#define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */
|
|
|
|
#define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */
|
|
|
|
#define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */
|
|
|
|
#define IRQ_LEVEL 0x00004000 /* IRQ level triggered */
|
|
|
|
#define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */
|
|
|
|
#define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */
|
|
|
|
#define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */
|
|
|
|
#define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */
|
|
|
|
#define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */
|
2007-02-16 16:28:24 +07:00
|
|
|
#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */
|
|
|
|
#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
|
|
|
|
#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
|
2008-04-28 22:01:56 +07:00
|
|
|
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
|
2008-11-07 19:18:30 +07:00
|
|
|
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
|
|
|
|
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
|
2009-03-17 04:33:49 +07:00
|
|
|
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
|
2009-08-13 17:17:22 +07:00
|
|
|
#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
|
2009-08-13 18:21:38 +07:00
|
|
|
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
|
2007-02-16 16:27:24 +07:00
|
|
|
|
2006-06-29 16:24:43 +07:00
|
|
|
#ifdef CONFIG_IRQ_PER_CPU
|
2005-09-07 05:17:25 +07:00
|
|
|
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
2007-02-16 16:27:24 +07:00
|
|
|
# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
2005-09-07 05:17:25 +07:00
|
|
|
#else
|
|
|
|
# define CHECK_IRQ_PER_CPU(var) 0
|
2007-02-16 16:27:24 +07:00
|
|
|
# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
|
2005-09-07 05:17:25 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
struct proc_dir_entry;
|
2007-01-29 02:52:03 +07:00
|
|
|
struct msi_desc;
|
2006-06-29 16:24:51 +07:00
|
|
|
|
2010-09-27 19:44:25 +07:00
|
|
|
/**
|
|
|
|
* struct irq_data - per irq and irq chip data passed down to chip functions
|
|
|
|
* @irq: interrupt number
|
|
|
|
* @node: node index useful for balancing
|
|
|
|
* @chip: low level interrupt hardware access
|
|
|
|
* @handler_data: per-IRQ data for the irq_chip methods
|
|
|
|
* @chip_data: platform-specific per-chip private data for the chip
|
|
|
|
* methods, to allow shared chip implementations
|
|
|
|
* @msi_desc: MSI descriptor
|
|
|
|
* @affinity: IRQ affinity on SMP
|
|
|
|
* @irq_2_iommu: iommu with this irq
|
|
|
|
*
|
|
|
|
* The fields here need to overlay the ones in irq_desc until we
|
|
|
|
* cleaned up the direct references and switched everything over to
|
|
|
|
* irq_data.
|
|
|
|
*/
|
|
|
|
struct irq_data {
|
|
|
|
unsigned int irq;
|
|
|
|
unsigned int node;
|
|
|
|
struct irq_chip *chip;
|
|
|
|
void *handler_data;
|
|
|
|
void *chip_data;
|
|
|
|
struct msi_desc *msi_desc;
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
cpumask_var_t affinity;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_INTR_REMAP
|
|
|
|
struct irq_2_iommu *irq_2_iommu;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2006-06-29 16:24:45 +07:00
|
|
|
/**
|
2006-06-29 16:24:51 +07:00
|
|
|
* struct irq_chip - hardware interrupt chip descriptor
|
2006-06-29 16:24:45 +07:00
|
|
|
*
|
|
|
|
* @name: name for /proc/interrupts
|
|
|
|
* @startup: start up the interrupt (defaults to ->enable if NULL)
|
|
|
|
* @shutdown: shut down the interrupt (defaults to ->disable if NULL)
|
|
|
|
* @enable: enable the interrupt (defaults to chip->unmask if NULL)
|
2010-01-05 20:29:58 +07:00
|
|
|
* @disable: disable the interrupt
|
2006-06-29 16:24:45 +07:00
|
|
|
* @ack: start of a new interrupt
|
|
|
|
* @mask: mask an interrupt source
|
|
|
|
* @mask_ack: ack and mask an interrupt source
|
|
|
|
* @unmask: unmask an interrupt source
|
2006-06-29 16:25:03 +07:00
|
|
|
* @eoi: end of interrupt - chip level
|
|
|
|
* @end: end of interrupt - flow level
|
2006-06-29 16:24:45 +07:00
|
|
|
* @set_affinity: set the CPU affinity on SMP machines
|
|
|
|
* @retrigger: resend an IRQ to the CPU
|
|
|
|
* @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
|
|
|
|
* @set_wake: enable/disable power-management wake-on of an IRQ
|
|
|
|
*
|
2009-08-13 17:17:48 +07:00
|
|
|
* @bus_lock: function to lock access to slow bus (i2c) chips
|
|
|
|
* @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
|
|
|
|
*
|
2006-06-29 16:24:45 +07:00
|
|
|
* @release: release function solely used by UML
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-06-29 16:24:51 +07:00
|
|
|
struct irq_chip {
|
|
|
|
const char *name;
|
2006-06-29 16:24:41 +07:00
|
|
|
unsigned int (*startup)(unsigned int irq);
|
|
|
|
void (*shutdown)(unsigned int irq);
|
|
|
|
void (*enable)(unsigned int irq);
|
|
|
|
void (*disable)(unsigned int irq);
|
2006-06-29 16:24:51 +07:00
|
|
|
|
2006-06-29 16:24:41 +07:00
|
|
|
void (*ack)(unsigned int irq);
|
2006-06-29 16:24:51 +07:00
|
|
|
void (*mask)(unsigned int irq);
|
|
|
|
void (*mask_ack)(unsigned int irq);
|
|
|
|
void (*unmask)(unsigned int irq);
|
2006-06-29 16:25:03 +07:00
|
|
|
void (*eoi)(unsigned int irq);
|
2006-06-29 16:24:51 +07:00
|
|
|
|
2006-06-29 16:24:41 +07:00
|
|
|
void (*end)(unsigned int irq);
|
2009-04-28 07:59:21 +07:00
|
|
|
int (*set_affinity)(unsigned int irq,
|
2008-12-13 17:50:26 +07:00
|
|
|
const struct cpumask *dest);
|
2006-06-29 16:24:44 +07:00
|
|
|
int (*retrigger)(unsigned int irq);
|
2006-06-29 16:24:51 +07:00
|
|
|
int (*set_type)(unsigned int irq, unsigned int flow_type);
|
|
|
|
int (*set_wake)(unsigned int irq, unsigned int on);
|
2006-06-29 16:24:44 +07:00
|
|
|
|
2009-08-13 17:17:48 +07:00
|
|
|
void (*bus_lock)(unsigned int irq);
|
|
|
|
void (*bus_sync_unlock)(unsigned int irq);
|
|
|
|
|
2005-06-22 07:16:24 +07:00
|
|
|
/* Currently used only by UML, might disappear one day.*/
|
|
|
|
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
2006-06-29 16:24:41 +07:00
|
|
|
void (*release)(unsigned int irq, void *dev_id);
|
2005-06-22 07:16:24 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
2008-12-06 09:58:31 +07:00
|
|
|
struct timer_rand_state;
|
|
|
|
struct irq_2_iommu;
|
2006-06-29 16:24:45 +07:00
|
|
|
/**
|
|
|
|
* struct irq_desc - interrupt descriptor
|
2010-09-27 19:44:25 +07:00
|
|
|
* @irq_data: per irq and chip data passed down to chip functions
|
2008-12-19 07:57:52 +07:00
|
|
|
* @timer_rand_state: pointer to timer rand state struct
|
|
|
|
* @kstat_irqs: irq stats per cpu
|
2006-06-29 16:24:51 +07:00
|
|
|
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
|
2006-06-29 16:24:45 +07:00
|
|
|
* @action: the irq action chain
|
|
|
|
* @status: status information
|
|
|
|
* @depth: disable-depth, for nested irq_disable() calls
|
2006-07-30 17:03:08 +07:00
|
|
|
* @wake_depth: enable depth, for multiple set_irq_wake() callers
|
2006-06-29 16:24:45 +07:00
|
|
|
* @irq_count: stats field to detect stalled irqs
|
2007-07-31 14:39:03 +07:00
|
|
|
* @last_unhandled: aging timer for unhandled count
|
2008-11-23 21:34:43 +07:00
|
|
|
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
2006-06-29 16:24:45 +07:00
|
|
|
* @lock: locking for SMP
|
|
|
|
* @pending_mask: pending rebalanced interrupts
|
2009-03-24 00:28:15 +07:00
|
|
|
* @threads_active: number of irqaction threads currently running
|
|
|
|
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
|
2006-06-29 16:24:45 +07:00
|
|
|
* @dir: /proc/irq/ procfs entry
|
2006-10-17 14:10:03 +07:00
|
|
|
* @name: flow handler name for /proc/interrupts output
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-06-29 16:24:40 +07:00
|
|
|
struct irq_desc {
|
2010-09-27 19:44:25 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This union will go away, once we fixed the direct access to
|
|
|
|
* irq_desc all over the place. The direct fields are a 1:1
|
|
|
|
* overlay of irq_data.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct irq_data irq_data;
|
|
|
|
struct {
|
|
|
|
unsigned int irq;
|
|
|
|
unsigned int node;
|
|
|
|
struct irq_chip *chip;
|
|
|
|
void *handler_data;
|
|
|
|
void *chip_data;
|
|
|
|
struct msi_desc *msi_desc;
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
cpumask_var_t affinity;
|
|
|
|
#endif
|
2009-01-08 06:03:13 +07:00
|
|
|
#ifdef CONFIG_INTR_REMAP
|
2010-09-27 19:44:25 +07:00
|
|
|
struct irq_2_iommu *irq_2_iommu;
|
2008-12-06 09:58:31 +07:00
|
|
|
#endif
|
2010-09-27 19:44:25 +07:00
|
|
|
};
|
|
|
|
};
|
|
|
|
struct timer_rand_state *timer_rand_state;
|
|
|
|
unsigned int *kstat_irqs;
|
2006-10-05 19:06:34 +07:00
|
|
|
irq_flow_handler_t handle_irq;
|
2006-06-29 16:24:41 +07:00
|
|
|
struct irqaction *action; /* IRQ action list */
|
|
|
|
unsigned int status; /* IRQ status */
|
2006-06-29 16:24:51 +07:00
|
|
|
|
2006-06-29 16:24:41 +07:00
|
|
|
unsigned int depth; /* nested irq disables */
|
2006-07-30 17:03:08 +07:00
|
|
|
unsigned int wake_depth; /* nested wake enables */
|
2006-06-29 16:24:41 +07:00
|
|
|
unsigned int irq_count; /* For detecting broken IRQs */
|
2007-07-16 13:40:55 +07:00
|
|
|
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
2008-11-23 21:34:43 +07:00
|
|
|
unsigned int irqs_unhandled;
|
2009-11-17 22:46:45 +07:00
|
|
|
raw_spinlock_t lock;
|
2006-06-29 16:24:38 +07:00
|
|
|
#ifdef CONFIG_SMP
|
2010-05-01 04:44:50 +07:00
|
|
|
const struct cpumask *affinity_hint;
|
2008-08-20 10:50:23 +07:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
2009-01-11 12:58:08 +07:00
|
|
|
cpumask_var_t pending_mask;
|
|
|
|
#endif
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
#endif
|
2009-03-24 00:28:15 +07:00
|
|
|
atomic_t threads_active;
|
|
|
|
wait_queue_head_t wait_for_threads;
|
2006-06-29 16:24:42 +07:00
|
|
|
#ifdef CONFIG_PROC_FS
|
2006-10-17 14:10:03 +07:00
|
|
|
struct proc_dir_entry *dir;
|
2006-06-29 16:24:42 +07:00
|
|
|
#endif
|
2006-10-17 14:10:03 +07:00
|
|
|
const char *name;
|
2007-05-08 14:29:13 +07:00
|
|
|
} ____cacheline_internodealigned_in_smp;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-12-06 09:58:31 +07:00
|
|
|
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
|
2009-04-28 08:00:38 +07:00
|
|
|
struct irq_desc *desc, int node);
|
2008-12-06 09:58:31 +07:00
|
|
|
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
|
2008-08-20 10:50:10 +07:00
|
|
|
|
2008-12-06 09:58:31 +07:00
|
|
|
#ifndef CONFIG_SPARSE_IRQ
|
2006-06-29 16:24:40 +07:00
|
|
|
extern struct irq_desc irq_desc[NR_IRQS];
|
2009-04-30 15:17:50 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA_IRQ_DESC
|
2009-04-28 08:00:38 +07:00
|
|
|
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
|
2009-04-30 15:17:50 +07:00
|
|
|
#else
|
|
|
|
static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
|
|
|
{
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
#endif
|
2008-12-06 09:58:31 +07:00
|
|
|
|
2009-04-28 08:00:38 +07:00
|
|
|
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
|
2008-12-06 09:58:31 +07:00
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
/*
|
|
|
|
* Pick up the arch-dependent methods:
|
|
|
|
*/
|
|
|
|
#include <asm/hw_irq.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
extern int setup_irq(unsigned int irq, struct irqaction *new);
|
2009-03-12 19:05:51 +07:00
|
|
|
extern void remove_irq(unsigned int irq, struct irqaction *act);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_HARDIRQS
|
2006-06-29 16:24:40 +07:00
|
|
|
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
2008-08-20 10:50:23 +07:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
|
2006-03-25 18:07:36 +07:00
|
|
|
void move_native_irq(int irq);
|
2006-10-04 16:16:29 +07:00
|
|
|
void move_masked_irq(int irq);
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
|
2008-08-20 10:50:23 +07:00
|
|
|
#else /* CONFIG_GENERIC_PENDING_IRQ */
|
2006-06-29 16:24:40 +07:00
|
|
|
|
|
|
|
static inline void move_irq(int irq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void move_native_irq(int irq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2006-10-04 16:16:29 +07:00
|
|
|
static inline void move_masked_irq(int irq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
#endif /* CONFIG_GENERIC_PENDING_IRQ */
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
#else /* CONFIG_SMP */
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
|
|
|
|
#define move_native_irq(x)
|
2006-10-04 16:16:29 +07:00
|
|
|
#define move_masked_irq(x)
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
#endif /* CONFIG_SMP */
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-07 05:16:15 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
extern int no_irq_affinity;
|
|
|
|
|
2007-02-16 16:27:24 +07:00
|
|
|
static inline int irq_balancing_disabled(unsigned int irq)
|
|
|
|
{
|
2008-08-20 10:50:05 +07:00
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
desc = irq_to_desc(irq);
|
|
|
|
return desc->status & IRQ_NO_BALANCING_MASK;
|
2007-02-16 16:27:24 +07:00
|
|
|
}
|
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
/* Handle irq action chains: */
|
2008-10-01 04:14:27 +07:00
|
|
|
extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
|
2006-06-29 16:24:51 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Built-in IRQ handlers for various IRQ types,
|
2009-11-16 00:57:24 +07:00
|
|
|
* callable via desc->handle_irq()
|
2006-06-29 16:24:51 +07:00
|
|
|
*/
|
2008-02-08 19:19:55 +07:00
|
|
|
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
|
|
|
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
|
|
|
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
|
|
|
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
|
|
|
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
|
|
|
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
2009-08-25 02:28:04 +07:00
|
|
|
extern void handle_nested_irq(unsigned int irq);
|
2006-06-29 16:24:51 +07:00
|
|
|
|
2006-06-29 16:24:39 +07:00
|
|
|
/*
|
2006-06-29 16:24:51 +07:00
|
|
|
* Monolithic do_IRQ implementation.
|
2006-06-29 16:24:39 +07:00
|
|
|
*/
|
2006-09-26 13:32:07 +07:00
|
|
|
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
2008-02-08 19:19:55 +07:00
|
|
|
extern unsigned int __do_IRQ(unsigned int irq);
|
2006-09-26 13:32:07 +07:00
|
|
|
#endif
|
2006-06-29 16:24:39 +07:00
|
|
|
|
2006-06-29 16:24:52 +07:00
|
|
|
/*
|
|
|
|
* Architectures call this to let the generic IRQ layer
|
|
|
|
* handle an interrupt. If the descriptor is attached to an
|
|
|
|
* irqchip-style controller then we call the ->handle_irq() handler,
|
|
|
|
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
|
|
|
|
*/
|
2008-08-20 10:50:15 +07:00
|
|
|
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
2006-06-29 16:24:52 +07:00
|
|
|
{
|
2006-09-26 13:32:07 +07:00
|
|
|
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 20:55:46 +07:00
|
|
|
desc->handle_irq(irq, desc);
|
2006-09-26 13:32:07 +07:00
|
|
|
#else
|
2006-06-29 16:24:52 +07:00
|
|
|
if (likely(desc->handle_irq))
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 20:55:46 +07:00
|
|
|
desc->handle_irq(irq, desc);
|
2006-06-29 16:24:52 +07:00
|
|
|
else
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 20:55:46 +07:00
|
|
|
__do_IRQ(irq);
|
2006-09-26 13:32:07 +07:00
|
|
|
#endif
|
2006-06-29 16:24:52 +07:00
|
|
|
}
|
|
|
|
|
2008-08-20 10:50:15 +07:00
|
|
|
static inline void generic_handle_irq(unsigned int irq)
|
|
|
|
{
|
|
|
|
generic_handle_irq_desc(irq, irq_to_desc(irq));
|
|
|
|
}
|
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
/* Handling of unhandled and spurious interrupts: */
|
2006-06-29 16:24:40 +07:00
|
|
|
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
2008-10-01 04:14:27 +07:00
|
|
|
irqreturn_t action_ret);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-29 16:24:48 +07:00
|
|
|
/* Resending of interrupts :*/
|
|
|
|
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
|
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
/* Enable/disable irq debugging output: */
|
|
|
|
extern int noirqdebug_setup(char *str);
|
|
|
|
|
|
|
|
/* Checks whether the interrupt can be requested by request_irq(): */
|
|
|
|
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
|
|
|
|
|
2006-07-02 04:30:08 +07:00
|
|
|
/* Dummy irq-chip implementations: */
|
2006-06-29 16:24:51 +07:00
|
|
|
extern struct irq_chip no_irq_chip;
|
2006-07-02 04:30:08 +07:00
|
|
|
extern struct irq_chip dummy_irq_chip;
|
2006-06-29 16:24:51 +07:00
|
|
|
|
2006-10-20 13:28:28 +07:00
|
|
|
extern void
|
|
|
|
set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
|
|
|
|
irq_flow_handler_t handle);
|
2006-06-29 16:24:51 +07:00
|
|
|
extern void
|
2006-10-17 14:10:03 +07:00
|
|
|
set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
|
|
|
irq_flow_handler_t handle, const char *name);
|
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
extern void
|
2006-10-17 14:10:03 +07:00
|
|
|
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|
|
|
const char *name);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-12-19 00:05:58 +07:00
|
|
|
/* caller has locked the irq_desc and both params are valid */
|
|
|
|
static inline void __set_irq_handler_unlocked(int irq,
|
|
|
|
irq_flow_handler_t handler)
|
|
|
|
{
|
2008-08-20 10:50:05 +07:00
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
desc = irq_to_desc(irq);
|
|
|
|
desc->handle_irq = handler;
|
2007-12-19 00:05:58 +07:00
|
|
|
}
|
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
/*
|
|
|
|
* Set a highlevel flow handler for a given IRQ:
|
|
|
|
*/
|
|
|
|
static inline void
|
2006-10-05 19:06:34 +07:00
|
|
|
set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
|
2006-06-29 16:24:51 +07:00
|
|
|
{
|
2006-10-17 14:10:03 +07:00
|
|
|
__set_irq_handler(irq, handle, 0, NULL);
|
2006-06-29 16:24:51 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a highlevel chained flow handler for a given IRQ.
|
|
|
|
* (a chained handler is automatically enabled and set to
|
|
|
|
* IRQ_NOREQUEST and IRQ_NOPROBE)
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
set_irq_chained_handler(unsigned int irq,
|
2006-10-05 19:06:34 +07:00
|
|
|
irq_flow_handler_t handle)
|
2006-06-29 16:24:51 +07:00
|
|
|
{
|
2006-10-17 14:10:03 +07:00
|
|
|
__set_irq_handler(irq, handle, 1, NULL);
|
2006-06-29 16:24:51 +07:00
|
|
|
}
|
|
|
|
|
2009-08-13 18:21:38 +07:00
|
|
|
extern void set_irq_nested_thread(unsigned int irq, int nest);
|
|
|
|
|
2008-02-08 19:22:01 +07:00
|
|
|
extern void set_irq_noprobe(unsigned int irq);
|
|
|
|
extern void set_irq_probe(unsigned int irq);
|
|
|
|
|
2006-10-04 16:16:37 +07:00
|
|
|
/* Handle dynamic irq creation and destruction */
|
2009-04-28 08:02:23 +07:00
|
|
|
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
|
2006-10-04 16:16:37 +07:00
|
|
|
extern int create_irq(void);
|
|
|
|
extern void destroy_irq(unsigned int irq);
|
|
|
|
|
2006-10-04 16:16:56 +07:00
|
|
|
/* Test to see if a driver has successfully requested an irq */
|
|
|
|
static inline int irq_has_action(unsigned int irq)
|
|
|
|
{
|
2008-08-20 10:50:05 +07:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
2006-10-04 16:16:56 +07:00
|
|
|
return desc->action != NULL;
|
|
|
|
}
|
|
|
|
|
2006-10-04 16:16:37 +07:00
|
|
|
/* Dynamic irq helper functions */
|
|
|
|
extern void dynamic_irq_init(unsigned int irq);
|
2010-02-10 16:20:06 +07:00
|
|
|
void dynamic_irq_init_keep_chip_data(unsigned int irq);
|
2006-10-04 16:16:37 +07:00
|
|
|
extern void dynamic_irq_cleanup(unsigned int irq);
|
2010-02-10 16:20:06 +07:00
|
|
|
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
|
2006-06-29 16:24:53 +07:00
|
|
|
|
2006-10-04 16:16:37 +07:00
|
|
|
/* Set/get chip/data for an IRQ: */
|
2006-06-29 16:24:53 +07:00
|
|
|
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
|
|
|
|
extern int set_irq_data(unsigned int irq, void *data);
|
|
|
|
extern int set_irq_chip_data(unsigned int irq, void *data);
|
|
|
|
extern int set_irq_type(unsigned int irq, unsigned int type);
|
2007-01-29 02:52:03 +07:00
|
|
|
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
2006-06-29 16:24:53 +07:00
|
|
|
|
2010-09-27 19:44:25 +07:00
|
|
|
#define get_irq_chip(irq) (irq_to_desc(irq)->irq_data.chip)
|
|
|
|
#define get_irq_chip_data(irq) (irq_to_desc(irq)->irq_data.chip_data)
|
|
|
|
#define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data)
|
|
|
|
#define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc)
|
2006-06-29 16:24:53 +07:00
|
|
|
|
2010-09-27 19:44:25 +07:00
|
|
|
#define get_irq_desc_chip(desc) ((desc)->irq_data.chip)
|
|
|
|
#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data)
|
|
|
|
#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
|
|
|
|
#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
|
2008-12-06 09:58:31 +07:00
|
|
|
|
2006-06-29 16:24:51 +07:00
|
|
|
#endif /* CONFIG_GENERIC_HARDIRQS */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
#endif /* !CONFIG_S390 */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-01-11 12:58:08 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/**
|
2009-04-28 07:57:18 +07:00
|
|
|
* alloc_desc_masks - allocate cpumasks for irq_desc
|
2009-01-11 12:58:08 +07:00
|
|
|
* @desc: pointer to irq_desc struct
|
2009-06-14 10:01:00 +07:00
|
|
|
* @node: node which will be handling the cpumasks
|
2009-01-11 12:58:08 +07:00
|
|
|
* @boot: true if need bootmem
|
|
|
|
*
|
|
|
|
* Allocates affinity and pending_mask cpumask if required.
|
|
|
|
* Returns true if successful (or not required).
|
|
|
|
*/
|
2009-04-28 08:00:38 +07:00
|
|
|
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
2009-05-25 19:10:58 +07:00
|
|
|
bool boot)
|
2009-01-11 12:58:08 +07:00
|
|
|
{
|
2009-05-25 19:10:58 +07:00
|
|
|
gfp_t gfp = GFP_ATOMIC;
|
2009-01-11 12:58:08 +07:00
|
|
|
|
2009-05-25 19:10:58 +07:00
|
|
|
if (boot)
|
|
|
|
gfp = GFP_NOWAIT;
|
2009-01-11 12:58:08 +07:00
|
|
|
|
2009-05-25 19:10:58 +07:00
|
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
2010-10-01 17:58:38 +07:00
|
|
|
if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
|
2009-01-11 12:58:08 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
2009-05-25 19:10:58 +07:00
|
|
|
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
2010-10-01 17:58:38 +07:00
|
|
|
free_cpumask_var(desc->irq_data.affinity);
|
2009-01-11 12:58:08 +07:00
|
|
|
return false;
|
|
|
|
}
|
2009-04-28 07:57:18 +07:00
|
|
|
#endif
|
2009-01-11 12:58:08 +07:00
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-04-28 07:57:18 +07:00
|
|
|
static inline void init_desc_masks(struct irq_desc *desc)
|
|
|
|
{
|
2010-10-01 17:58:38 +07:00
|
|
|
cpumask_setall(desc->irq_data.affinity);
|
2009-04-28 07:57:18 +07:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
cpumask_clear(desc->pending_mask);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-01-11 12:58:08 +07:00
|
|
|
/**
|
|
|
|
* init_copy_desc_masks - copy cpumasks for irq_desc
|
|
|
|
* @old_desc: pointer to old irq_desc struct
|
|
|
|
* @new_desc: pointer to new irq_desc struct
|
|
|
|
*
|
|
|
|
* Insures affinity and pending_masks are copied to new irq_desc.
|
|
|
|
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
|
|
|
|
* irq_desc struct so the copy is redundant.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
2009-04-28 07:57:18 +07:00
|
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
2010-10-01 17:58:38 +07:00
|
|
|
cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
|
2009-01-11 12:58:08 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-03-31 10:37:20 +07:00
|
|
|
static inline void free_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
2010-10-01 17:58:38 +07:00
|
|
|
free_cpumask_var(old_desc->irq_data.affinity);
|
2009-03-31 10:37:20 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
free_cpumask_var(old_desc->pending_mask);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-01-11 12:58:08 +07:00
|
|
|
#else /* !CONFIG_SMP */
|
|
|
|
|
2009-04-28 08:00:38 +07:00
|
|
|
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
2009-01-11 12:58:08 +07:00
|
|
|
bool boot)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-04-28 07:57:18 +07:00
|
|
|
static inline void init_desc_masks(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-01-11 12:58:08 +07:00
|
|
|
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-03-31 10:37:20 +07:00
|
|
|
static inline void free_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
|
|
|
}
|
2009-01-11 12:58:08 +07:00
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2006-06-29 16:24:40 +07:00
|
|
|
#endif /* _LINUX_IRQ_H */
|