2013-06-06 23:27:09 +07:00
|
|
|
/*
|
|
|
|
* Marvell Orion SoCs IRQ chip driver.
|
|
|
|
*
|
|
|
|
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
|
|
|
*
|
|
|
|
* This file is licensed under the terms of the GNU General Public
|
|
|
|
* License version 2. This program is licensed "as is" without any
|
|
|
|
* warranty of any kind, whether express or implied.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/irq.h>
|
2015-07-08 04:11:46 +07:00
|
|
|
#include <linux/irqchip.h>
|
2013-06-06 23:27:09 +07:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_irq.h>
|
|
|
|
#include <asm/exception.h>
|
|
|
|
#include <asm/mach/irq.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Orion SoC main interrupt controller
|
|
|
|
*/
|
|
|
|
#define ORION_IRQS_PER_CHIP 32
|
|
|
|
|
|
|
|
#define ORION_IRQ_CAUSE 0x00
|
|
|
|
#define ORION_IRQ_MASK 0x04
|
|
|
|
#define ORION_IRQ_FIQ_MASK 0x08
|
|
|
|
#define ORION_IRQ_ENDP_MASK 0x0c
|
|
|
|
|
|
|
|
static struct irq_domain *orion_irq_domain;
|
|
|
|
|
2014-03-05 07:40:30 +07:00
|
|
|
static void
|
2013-06-06 23:27:09 +07:00
|
|
|
__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
|
|
|
|
int n, base = 0;
|
|
|
|
|
|
|
|
for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
|
|
|
|
struct irq_chip_generic *gc =
|
|
|
|
irq_get_domain_generic_chip(orion_irq_domain, base);
|
|
|
|
u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
|
|
|
|
gc->mask_cache;
|
|
|
|
while (stat) {
|
irqchip: orion: Reverse irq handling priority
Non-DT irq handlers were working through irq causes from most-significant
to least-significant bit, while DT irqchip driver does it the other way
round. This revealed some more HW issues on Kirkwood peripheral IP, where
spurious sdio irqs can happen although irqs are masked.
Also, the generated binaries show that original non-DT order compared
to DT order save two instructions for each bit count check:
irqchip DT order with ffs():
60: e3a06001 mov r6, #1
64: e2643000 rsb r3, r4, #0
68: e0033004 and r3, r3, r4
6c: e16f3f13 clz r3, r3
70: e263301f rsb r3, r3, #31
74: e1c44316 bic r4, r4, r6, lsl r3
78: e5971004 ldr r1, [r7, #4]
Original non-DT order with fls():
60: e3a07001 mov r7, #1
64: e16f3f14 clz r3, r4
68: e263301f rsb r3, r3, #31
6c: e1c44317 bic r4, r4, r7, lsl r3
70: e5951004 ldr r1, [r5, #4]
Therefore, reverse irq bit handling back to original order by replacing
ffs() with fls().
Signed-off-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Link: https://lkml.kernel.org/r/1398719528-23607-1-git-send-email-sebastian.hesselbarth@gmail.com
Acked-by: Jason Cooper <jason@lakedaemon.net>
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-04-29 04:12:08 +07:00
|
|
|
u32 hwirq = __fls(stat);
|
2014-08-26 17:03:25 +07:00
|
|
|
handle_domain_irq(orion_irq_domain,
|
|
|
|
gc->irq_base + hwirq, regs);
|
2013-06-06 23:27:09 +07:00
|
|
|
stat &= ~(1 << hwirq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init orion_irq_init(struct device_node *np,
|
|
|
|
struct device_node *parent)
|
|
|
|
{
|
|
|
|
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
|
|
|
int n, ret, base, num_chips = 0;
|
|
|
|
struct resource r;
|
|
|
|
|
|
|
|
/* count number of irq chips by valid reg addresses */
|
|
|
|
while (of_address_to_resource(np, num_chips, &r) == 0)
|
|
|
|
num_chips++;
|
|
|
|
|
|
|
|
orion_irq_domain = irq_domain_add_linear(np,
|
|
|
|
num_chips * ORION_IRQS_PER_CHIP,
|
|
|
|
&irq_generic_chip_ops, NULL);
|
|
|
|
if (!orion_irq_domain)
|
2018-08-28 07:56:15 +07:00
|
|
|
panic("%pOFn: unable to add irq domain\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
|
|
|
ret = irq_alloc_domain_generic_chips(orion_irq_domain,
|
2018-08-28 07:56:15 +07:00
|
|
|
ORION_IRQS_PER_CHIP, 1, np->full_name,
|
2013-06-06 23:27:09 +07:00
|
|
|
handle_level_irq, clr, 0,
|
|
|
|
IRQ_GC_INIT_MASK_CACHE);
|
|
|
|
if (ret)
|
2018-08-28 07:56:15 +07:00
|
|
|
panic("%pOFn: unable to alloc irq domain gc\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
|
|
|
for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
|
|
|
|
struct irq_chip_generic *gc =
|
|
|
|
irq_get_domain_generic_chip(orion_irq_domain, base);
|
|
|
|
|
|
|
|
of_address_to_resource(np, n, &r);
|
|
|
|
|
|
|
|
if (!request_mem_region(r.start, resource_size(&r), np->name))
|
2018-08-28 07:56:15 +07:00
|
|
|
panic("%pOFn: unable to request mem region %d",
|
|
|
|
np, n);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
|
|
|
gc->reg_base = ioremap(r.start, resource_size(&r));
|
|
|
|
if (!gc->reg_base)
|
2018-08-28 07:56:15 +07:00
|
|
|
panic("%pOFn: unable to map resource %d", np, n);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
|
|
|
gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
|
|
|
|
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
|
|
|
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
|
|
|
|
|
|
|
/* mask all interrupts */
|
|
|
|
writel(0, gc->reg_base + ORION_IRQ_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
set_handle_irq(orion_handle_irq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Orion SoC bridge interrupt controller
|
|
|
|
*/
|
|
|
|
#define ORION_BRIDGE_IRQ_CAUSE 0x00
|
|
|
|
#define ORION_BRIDGE_IRQ_MASK 0x04
|
|
|
|
|
2015-09-14 15:42:37 +07:00
|
|
|
static void orion_bridge_irq_handler(struct irq_desc *desc)
|
2013-06-06 23:27:09 +07:00
|
|
|
{
|
2015-06-04 11:13:20 +07:00
|
|
|
struct irq_domain *d = irq_desc_get_handler_data(desc);
|
2014-02-07 06:41:58 +07:00
|
|
|
|
|
|
|
struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
|
2013-06-06 23:27:09 +07:00
|
|
|
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
|
|
|
|
gc->mask_cache;
|
|
|
|
|
|
|
|
while (stat) {
|
irqchip: orion: Reverse irq handling priority
Non-DT irq handlers were working through irq causes from most-significant
to least-significant bit, while DT irqchip driver does it the other way
round. This revealed some more HW issues on Kirkwood peripheral IP, where
spurious sdio irqs can happen although irqs are masked.
Also, the generated binaries show that original non-DT order compared
to DT order save two instructions for each bit count check:
irqchip DT order with ffs():
60: e3a06001 mov r6, #1
64: e2643000 rsb r3, r4, #0
68: e0033004 and r3, r3, r4
6c: e16f3f13 clz r3, r3
70: e263301f rsb r3, r3, #31
74: e1c44316 bic r4, r4, r6, lsl r3
78: e5971004 ldr r1, [r7, #4]
Original non-DT order with fls():
60: e3a07001 mov r7, #1
64: e16f3f14 clz r3, r4
68: e263301f rsb r3, r3, #31
6c: e1c44317 bic r4, r4, r7, lsl r3
70: e5951004 ldr r1, [r5, #4]
Therefore, reverse irq bit handling back to original order by replacing
ffs() with fls().
Signed-off-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Link: https://lkml.kernel.org/r/1398719528-23607-1-git-send-email-sebastian.hesselbarth@gmail.com
Acked-by: Jason Cooper <jason@lakedaemon.net>
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-04-29 04:12:08 +07:00
|
|
|
u32 hwirq = __fls(stat);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
|
|
|
generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
|
|
|
|
stat &= ~(1 << hwirq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-24 06:10:32 +07:00
|
|
|
/*
|
|
|
|
* Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
|
|
|
|
* To avoid interrupt events on stale irqs, we clear them before unmask.
|
|
|
|
*/
|
|
|
|
static unsigned int orion_bridge_irq_startup(struct irq_data *d)
|
|
|
|
{
|
|
|
|
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
|
|
|
|
|
|
ct->chip.irq_ack(d);
|
|
|
|
ct->chip.irq_unmask(d);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-06 23:27:09 +07:00
|
|
|
static int __init orion_bridge_irq_init(struct device_node *np,
|
|
|
|
struct device_node *parent)
|
|
|
|
{
|
|
|
|
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
|
|
|
struct resource r;
|
|
|
|
struct irq_domain *domain;
|
|
|
|
struct irq_chip_generic *gc;
|
|
|
|
int ret, irq, nrirqs = 32;
|
|
|
|
|
|
|
|
/* get optional number of interrupts provided */
|
|
|
|
of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
|
|
|
|
|
|
|
|
domain = irq_domain_add_linear(np, nrirqs,
|
|
|
|
&irq_generic_chip_ops, NULL);
|
|
|
|
if (!domain) {
|
2018-08-28 07:56:15 +07:00
|
|
|
pr_err("%pOFn: unable to add irq domain\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
|
2014-01-24 05:38:05 +07:00
|
|
|
handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
2013-06-06 23:27:09 +07:00
|
|
|
if (ret) {
|
2018-08-28 07:56:15 +07:00
|
|
|
pr_err("%pOFn: unable to alloc irq domain gc\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = of_address_to_resource(np, 0, &r);
|
|
|
|
if (ret) {
|
2018-08-28 07:56:15 +07:00
|
|
|
pr_err("%pOFn: unable to get resource\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!request_mem_region(r.start, resource_size(&r), np->name)) {
|
|
|
|
pr_err("%s: unable to request mem region\n", np->name);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the parent interrupt for the chained handler */
|
|
|
|
irq = irq_of_parse_and_map(np, 0);
|
|
|
|
if (irq <= 0) {
|
2018-08-28 07:56:15 +07:00
|
|
|
pr_err("%pOFn: unable to parse irq\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
gc = irq_get_domain_generic_chip(domain, 0);
|
|
|
|
gc->reg_base = ioremap(r.start, resource_size(&r));
|
|
|
|
if (!gc->reg_base) {
|
2018-08-28 07:56:15 +07:00
|
|
|
pr_err("%pOFn: unable to map resource\n", np);
|
2013-06-06 23:27:09 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
|
|
|
|
gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
|
2014-01-24 06:10:32 +07:00
|
|
|
gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
|
2013-06-06 23:27:09 +07:00
|
|
|
gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
|
|
|
|
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
|
|
|
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
|
|
|
|
2014-01-24 05:38:04 +07:00
|
|
|
/* mask and clear all interrupts */
|
2013-06-06 23:27:09 +07:00
|
|
|
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
|
2014-01-24 05:38:04 +07:00
|
|
|
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
irqchip/orion: Consolidate chained IRQ handler install/remove
Chained irq handlers usually set up handler data as well. We now have
a function to set both under irq_desc->lock. Replace the two calls
with one.
Search and conversion was done with coccinelle:
@@
expression E1, E2, E3;
@@
(
-if (irq_set_handler_data(E1, E2) != 0)
- BUG();
|
-irq_set_handler_data(E1, E2);
)
-irq_set_chained_handler(E1, E3);
+irq_set_chained_handler_and_data(E1, E3, E2);
@@
expression E1, E2, E3;
@@
(
-if (irq_set_handler_data(E1, E2) != 0)
- BUG();
...
|
-irq_set_handler_data(E1, E2);
...
)
-irq_set_chained_handler(E1, E3);
+irq_set_chained_handler_and_data(E1, E3, E2);
Reported-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jason Cooper <jason@lakedaemon.net>
2015-06-22 02:10:57 +07:00
|
|
|
irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
|
|
|
|
domain);
|
2013-06-06 23:27:09 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
IRQCHIP_DECLARE(orion_bridge_intc,
|
|
|
|
"marvell,orion-bridge-intc", orion_bridge_irq_init);
|