2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Derived from arch/i386/kernel/irq.c
|
|
|
|
* Copyright (C) 1992 Linus Torvalds
|
|
|
|
* Adapted from arch/i386 by Gary Thomas
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
2005-11-09 14:07:45 +07:00
|
|
|
* Updated and modified by Cort Dougan <cort@fsmlabs.com>
|
|
|
|
* Copyright (C) 1996-2001 Cort Dougan
|
2005-04-17 05:20:36 +07:00
|
|
|
* Adapted for Power Macintosh by Paul Mackerras
|
|
|
|
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
|
|
|
|
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
2005-11-09 14:07:45 +07:00
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This file contains the code used by various IRQ handling routines:
|
|
|
|
* asking for different IRQ's should be done through these routines
|
|
|
|
* instead of just grabbing them. Thus setups with different IRQ numbers
|
|
|
|
* shouldn't result in any weird surprises, and installing new handlers
|
|
|
|
* should be easier.
|
2005-11-09 14:07:45 +07:00
|
|
|
*
|
|
|
|
* The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
|
|
|
|
* interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
|
|
|
|
* mask register (of which only 16 are defined), hence the weird shifting
|
|
|
|
* and complement of the cached_irq_mask. I want to be able to stuff
|
|
|
|
* this right into the SIU SMASK register.
|
|
|
|
* Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
|
|
|
|
* to reduce code space and undefined function references.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
#undef DEBUG
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/sched.h>
|
2005-11-09 14:07:45 +07:00
|
|
|
#include <linux/ptrace.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/timex.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/irq.h>
|
2005-11-09 14:07:45 +07:00
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/cpumask.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/profile.h>
|
|
|
|
#include <linux/bitops.h>
|
2006-07-03 18:36:01 +07:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/radix-tree.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/bootmem.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/machdep.h>
|
2006-07-03 18:36:01 +07:00
|
|
|
#include <asm/udbg.h>
|
2005-11-17 12:14:17 +07:00
|
|
|
#ifdef CONFIG_PPC_ISERIES
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/paca.h>
|
2005-11-09 14:07:45 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-10 14:38:46 +07:00
|
|
|
int __irq_offset_value;
|
2005-11-09 14:07:45 +07:00
|
|
|
static int ppc_spurious_interrupts;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC32
|
2006-07-03 16:32:51 +07:00
|
|
|
EXPORT_SYMBOL(__irq_offset_value);
|
|
|
|
atomic_t ppc_n_lost_interrupts;
|
2005-11-09 14:07:45 +07:00
|
|
|
|
2006-07-03 16:32:51 +07:00
|
|
|
#ifndef CONFIG_PPC_MERGE
|
|
|
|
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
|
2005-11-09 14:07:45 +07:00
|
|
|
unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
|
2006-07-03 16:32:51 +07:00
|
|
|
#endif
|
2005-11-09 14:07:45 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_TAU_INT
|
|
|
|
extern int tau_initialized;
|
|
|
|
extern int tau_interrupts(int);
|
|
|
|
#endif
|
2006-07-03 16:32:51 +07:00
|
|
|
#endif /* CONFIG_PPC32 */
|
2005-11-09 14:07:45 +07:00
|
|
|
|
|
|
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
|
|
|
|
extern atomic_t ipi_recv;
|
|
|
|
extern atomic_t ipi_sent;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
2005-04-17 05:20:36 +07:00
|
|
|
EXPORT_SYMBOL(irq_desc);
|
|
|
|
|
|
|
|
int distribute_irqs = 1;
|
2005-11-09 14:07:45 +07:00
|
|
|
#endif /* CONFIG_PPC64 */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
|
|
{
|
2005-11-09 14:07:45 +07:00
|
|
|
int i = *(loff_t *)v, j;
|
|
|
|
struct irqaction *action;
|
2005-04-17 05:20:36 +07:00
|
|
|
irq_desc_t *desc;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (i == 0) {
|
2005-11-09 14:07:45 +07:00
|
|
|
seq_puts(p, " ");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "CPU%d ", j);
|
2005-04-17 05:20:36 +07:00
|
|
|
seq_putc(p, '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i < NR_IRQS) {
|
|
|
|
desc = get_irq_desc(i);
|
|
|
|
spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
action = desc->action;
|
|
|
|
if (!action || !action->handler)
|
|
|
|
goto skip;
|
|
|
|
seq_printf(p, "%3d: ", i);
|
|
|
|
#ifdef CONFIG_SMP
|
2005-11-09 14:07:45 +07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
2005-04-17 05:20:36 +07:00
|
|
|
#else
|
|
|
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
|
|
|
#endif /* CONFIG_SMP */
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 16:24:36 +07:00
|
|
|
if (desc->chip)
|
|
|
|
seq_printf(p, " %s ", desc->chip->typename);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2005-11-09 14:07:45 +07:00
|
|
|
seq_puts(p, " None ");
|
2005-04-17 05:20:36 +07:00
|
|
|
seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
|
2005-11-09 14:07:45 +07:00
|
|
|
seq_printf(p, " %s", action->name);
|
|
|
|
for (action = action->next; action; action = action->next)
|
2005-04-17 05:20:36 +07:00
|
|
|
seq_printf(p, ", %s", action->name);
|
|
|
|
seq_putc(p, '\n');
|
|
|
|
skip:
|
|
|
|
spin_unlock_irqrestore(&desc->lock, flags);
|
2005-11-09 14:07:45 +07:00
|
|
|
} else if (i == NR_IRQS) {
|
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
#ifdef CONFIG_TAU_INT
|
|
|
|
if (tau_initialized){
|
|
|
|
seq_puts(p, "TAU: ");
|
2006-03-23 18:01:05 +07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", tau_interrupts(j));
|
2005-11-09 14:07:45 +07:00
|
|
|
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
|
|
|
|
/* should this be per processor send/receive? */
|
|
|
|
seq_printf(p, "IPI (recv/sent): %10u/%u\n",
|
|
|
|
atomic_read(&ipi_recv), atomic_read(&ipi_sent));
|
|
|
|
#endif
|
|
|
|
#endif /* CONFIG_PPC32 */
|
2005-04-17 05:20:36 +07:00
|
|
|
seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
|
2005-11-09 14:07:45 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
void fixup_irqs(cpumask_t map)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
static int warned;
|
|
|
|
|
|
|
|
for_each_irq(irq) {
|
|
|
|
cpumask_t mask;
|
|
|
|
|
|
|
|
if (irq_desc[irq].status & IRQ_PER_CPU)
|
|
|
|
continue;
|
|
|
|
|
2006-06-29 16:24:38 +07:00
|
|
|
cpus_and(mask, irq_desc[irq].affinity, map);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (any_online_cpu(mask) == NR_CPUS) {
|
|
|
|
printk("Breaking affinity for irq %i\n", irq);
|
|
|
|
mask = map;
|
|
|
|
}
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 16:24:36 +07:00
|
|
|
if (irq_desc[irq].chip->set_affinity)
|
|
|
|
irq_desc[irq].chip->set_affinity(irq, mask);
|
2005-04-17 05:20:36 +07:00
|
|
|
else if (irq_desc[irq].action && !(warned++))
|
|
|
|
printk("Cannot set affinity for irq %i\n", irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
mdelay(1);
|
|
|
|
local_irq_disable();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void do_IRQ(struct pt_regs *regs)
|
|
|
|
{
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int irq;
|
2005-11-09 09:28:33 +07:00
|
|
|
#ifdef CONFIG_IRQSTACKS
|
|
|
|
struct thread_info *curtp, *irqtp;
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-09 14:07:45 +07:00
|
|
|
irq_enter();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
|
|
/* Debugging check for stack overflow: is there less than 2KB free? */
|
|
|
|
{
|
|
|
|
long sp;
|
|
|
|
|
|
|
|
sp = __get_SP() & (THREAD_SIZE-1);
|
|
|
|
|
|
|
|
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
|
|
|
|
printk("do_IRQ: stack overflow: %ld\n",
|
|
|
|
sp - sizeof(struct thread_info));
|
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-11-09 14:07:45 +07:00
|
|
|
/*
|
|
|
|
* Every platform is required to implement ppc_md.get_irq.
|
|
|
|
* This function will either return an irq number or -1 to
|
|
|
|
* indicate there are no more pending.
|
|
|
|
* The value -2 is for buggy hardware and means that this IRQ
|
|
|
|
* has already been handled. -- Tom
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
irq = ppc_md.get_irq(regs);
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
|
2005-11-09 09:28:33 +07:00
|
|
|
#ifdef CONFIG_IRQSTACKS
|
|
|
|
/* Switch to the irq stack to handle this */
|
|
|
|
curtp = current_thread_info();
|
|
|
|
irqtp = hardirq_ctx[smp_processor_id()];
|
|
|
|
if (curtp != irqtp) {
|
2006-07-03 16:32:51 +07:00
|
|
|
struct irq_desc *desc = irq_desc + irq;
|
|
|
|
void *handler = desc->handle_irq;
|
|
|
|
if (handler == NULL)
|
|
|
|
handler = &__do_IRQ;
|
2005-11-09 09:28:33 +07:00
|
|
|
irqtp->task = curtp->task;
|
|
|
|
irqtp->flags = 0;
|
2006-07-03 16:32:51 +07:00
|
|
|
call_handle_irq(irq, desc, regs, irqtp, handler);
|
2005-11-09 09:28:33 +07:00
|
|
|
irqtp->task = NULL;
|
|
|
|
if (irqtp->flags)
|
|
|
|
set_bits(irqtp->flags, &curtp->flags);
|
|
|
|
} else
|
|
|
|
#endif
|
2006-07-03 16:32:51 +07:00
|
|
|
generic_handle_irq(irq, regs);
|
2006-07-03 18:36:01 +07:00
|
|
|
} else if (irq != NO_IRQ_IGNORE)
|
2005-11-16 14:53:29 +07:00
|
|
|
/* That's not SMP safe ... but who cares ? */
|
|
|
|
ppc_spurious_interrupts++;
|
|
|
|
|
2005-11-09 14:07:45 +07:00
|
|
|
irq_exit();
|
|
|
|
|
2005-11-16 14:53:29 +07:00
|
|
|
#ifdef CONFIG_PPC_ISERIES
|
2006-01-13 06:26:42 +07:00
|
|
|
if (get_lppaca()->int_dword.fields.decr_int) {
|
|
|
|
get_lppaca()->int_dword.fields.decr_int = 0;
|
|
|
|
/* Signal a fake decrementer interrupt */
|
|
|
|
timer_interrupt(regs);
|
2005-11-16 14:53:29 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
void __init init_IRQ(void)
|
|
|
|
{
|
|
|
|
ppc_md.init_IRQ();
|
2005-11-09 14:07:45 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-04-17 05:20:36 +07:00
|
|
|
irq_ctx_init();
|
2005-11-09 14:07:45 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_IRQSTACKS
|
2006-06-23 16:05:30 +07:00
|
|
|
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
|
|
|
|
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
void irq_ctx_init(void)
|
|
|
|
{
|
|
|
|
struct thread_info *tp;
|
|
|
|
int i;
|
|
|
|
|
2006-03-29 05:50:51 +07:00
|
|
|
for_each_possible_cpu(i) {
|
2005-04-17 05:20:36 +07:00
|
|
|
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
|
|
|
tp = softirq_ctx[i];
|
|
|
|
tp->cpu = i;
|
|
|
|
tp->preempt_count = SOFTIRQ_OFFSET;
|
|
|
|
|
|
|
|
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
|
|
|
|
tp = hardirq_ctx[i];
|
|
|
|
tp->cpu = i;
|
|
|
|
tp->preempt_count = HARDIRQ_OFFSET;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
static inline void do_softirq_onstack(void)
|
|
|
|
{
|
|
|
|
struct thread_info *curtp, *irqtp;
|
|
|
|
|
|
|
|
curtp = current_thread_info();
|
|
|
|
irqtp = softirq_ctx[smp_processor_id()];
|
|
|
|
irqtp->task = curtp->task;
|
|
|
|
call_do_softirq(irqtp);
|
|
|
|
irqtp->task = NULL;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
#else
|
|
|
|
#define do_softirq_onstack() __do_softirq()
|
|
|
|
#endif /* CONFIG_IRQSTACKS */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
void do_softirq(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (in_interrupt())
|
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2006-07-04 05:28:34 +07:00
|
|
|
if (local_softirq_pending())
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
do_softirq_onstack();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(do_softirq);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2006-07-03 18:36:01 +07:00
|
|
|
* IRQ controller and virtual interrupts
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
#ifdef CONFIG_PPC_MERGE
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
static LIST_HEAD(irq_hosts);
|
|
|
|
static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
|
2006-04-04 11:49:48 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
struct irq_map_entry irq_map[NR_IRQS];
|
|
|
|
static unsigned int irq_virq_count = NR_IRQS;
|
|
|
|
static struct irq_host *irq_default_host;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
struct irq_host *irq_alloc_host(unsigned int revmap_type,
|
|
|
|
unsigned int revmap_arg,
|
|
|
|
struct irq_host_ops *ops,
|
|
|
|
irq_hw_number_t inval_irq)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-07-03 18:36:01 +07:00
|
|
|
struct irq_host *host;
|
|
|
|
unsigned int size = sizeof(struct irq_host);
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int *rmap;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Allocate structure and revmap table if using linear mapping */
|
|
|
|
if (revmap_type == IRQ_HOST_MAP_LINEAR)
|
|
|
|
size += revmap_arg * sizeof(unsigned int);
|
|
|
|
if (mem_init_done)
|
|
|
|
host = kzalloc(size, GFP_KERNEL);
|
|
|
|
else {
|
|
|
|
host = alloc_bootmem(size);
|
|
|
|
if (host)
|
|
|
|
memset(host, 0, size);
|
|
|
|
}
|
|
|
|
if (host == NULL)
|
|
|
|
return NULL;
|
2006-04-04 11:49:48 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Fill structure */
|
|
|
|
host->revmap_type = revmap_type;
|
|
|
|
host->inval_irq = inval_irq;
|
|
|
|
host->ops = ops;
|
2006-04-04 11:49:48 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
|
|
|
|
/* If it's a legacy controller, check for duplicates and
|
|
|
|
* mark it as allocated (we use irq 0 host pointer for that
|
|
|
|
*/
|
|
|
|
if (revmap_type == IRQ_HOST_MAP_LEGACY) {
|
|
|
|
if (irq_map[0].host != NULL) {
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
/* If we are early boot, we can't free the structure,
|
|
|
|
* too bad...
|
|
|
|
* this will be fixed once slab is made available early
|
|
|
|
* instead of the current cruft
|
|
|
|
*/
|
|
|
|
if (mem_init_done)
|
|
|
|
kfree(host);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
irq_map[0].host = host;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&host->link, &irq_hosts);
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
|
|
|
|
/* Additional setups per revmap type */
|
|
|
|
switch(revmap_type) {
|
|
|
|
case IRQ_HOST_MAP_LEGACY:
|
|
|
|
/* 0 is always the invalid number for legacy */
|
|
|
|
host->inval_irq = 0;
|
|
|
|
/* setup us as the host for all legacy interrupts */
|
|
|
|
for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
|
|
|
|
irq_map[i].hwirq = 0;
|
|
|
|
smp_wmb();
|
|
|
|
irq_map[i].host = host;
|
|
|
|
smp_wmb();
|
|
|
|
|
|
|
|
/* Clear some flags */
|
|
|
|
get_irq_desc(i)->status
|
|
|
|
&= ~(IRQ_NOREQUEST | IRQ_LEVEL);
|
|
|
|
|
|
|
|
/* Legacy flags are left to default at this point,
|
|
|
|
* one can then use irq_create_mapping() to
|
|
|
|
* explicitely change them
|
|
|
|
*/
|
|
|
|
ops->map(host, i, i, 0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IRQ_HOST_MAP_LINEAR:
|
|
|
|
rmap = (unsigned int *)(host + 1);
|
|
|
|
for (i = 0; i < revmap_arg; i++)
|
|
|
|
rmap[i] = IRQ_NONE;
|
|
|
|
host->revmap_data.linear.size = revmap_arg;
|
|
|
|
smp_wmb();
|
|
|
|
host->revmap_data.linear.revmap = rmap;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
|
|
|
|
|
|
|
|
return host;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
struct irq_host *irq_find_host(struct device_node *node)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-07-03 18:36:01 +07:00
|
|
|
struct irq_host *h, *found = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* We might want to match the legacy controller last since
|
|
|
|
* it might potentially be set to match all interrupts in
|
|
|
|
* the absence of a device node. This isn't a problem so far
|
|
|
|
* yet though...
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
list_for_each_entry(h, &irq_hosts, link)
|
|
|
|
if (h->ops->match == NULL || h->ops->match(h, node)) {
|
|
|
|
found = h;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_find_host);
|
|
|
|
|
|
|
|
void irq_set_default_host(struct irq_host *host)
|
|
|
|
{
|
|
|
|
pr_debug("irq: Default host set to @0x%p\n", host);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
irq_default_host = host;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
void irq_set_virq_count(unsigned int count)
|
|
|
|
{
|
|
|
|
pr_debug("irq: Trying to set virq count to %d\n", count);
|
2005-06-23 06:43:37 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
BUG_ON(count < NUM_ISA_INTERRUPTS);
|
|
|
|
if (count < NR_IRQS)
|
|
|
|
irq_virq_count = count;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int irq_create_mapping(struct irq_host *host,
|
|
|
|
irq_hw_number_t hwirq,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
unsigned int virq, hint;
|
|
|
|
|
|
|
|
pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n",
|
|
|
|
host, hwirq, flags);
|
|
|
|
|
|
|
|
/* Look for default host if nececssary */
|
|
|
|
if (host == NULL)
|
|
|
|
host = irq_default_host;
|
|
|
|
if (host == NULL) {
|
|
|
|
printk(KERN_WARNING "irq_create_mapping called for"
|
|
|
|
" NULL host, hwirq=%lx\n", hwirq);
|
|
|
|
WARN_ON(1);
|
|
|
|
return NO_IRQ;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-03 18:36:01 +07:00
|
|
|
pr_debug("irq: -> using host @%p\n", host);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Check if mapping already exist, if it does, call
|
|
|
|
* host->ops->map() to update the flags
|
|
|
|
*/
|
|
|
|
virq = irq_find_mapping(host, hwirq);
|
|
|
|
if (virq != IRQ_NONE) {
|
|
|
|
pr_debug("irq: -> existing mapping on virq %d\n", virq);
|
|
|
|
host->ops->map(host, virq, hwirq, flags);
|
|
|
|
return virq;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Get a virtual interrupt number */
|
|
|
|
if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
|
|
|
|
/* Handle legacy */
|
|
|
|
virq = (unsigned int)hwirq;
|
|
|
|
if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
|
|
|
|
return NO_IRQ;
|
|
|
|
return virq;
|
|
|
|
} else {
|
|
|
|
/* Allocate a virtual interrupt number */
|
|
|
|
hint = hwirq % irq_virq_count;
|
|
|
|
virq = irq_alloc_virt(host, 1, hint);
|
|
|
|
if (virq == NO_IRQ) {
|
|
|
|
pr_debug("irq: -> virq allocation failed\n");
|
|
|
|
return NO_IRQ;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pr_debug("irq: -> obtained virq %d\n", virq);
|
|
|
|
|
|
|
|
/* Clear some flags */
|
|
|
|
get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL);
|
|
|
|
|
|
|
|
/* map it */
|
|
|
|
if (host->ops->map(host, virq, hwirq, flags)) {
|
|
|
|
pr_debug("irq: -> mapping failed, freeing\n");
|
|
|
|
irq_free_virt(virq, 1);
|
|
|
|
return NO_IRQ;
|
|
|
|
}
|
|
|
|
smp_wmb();
|
|
|
|
irq_map[virq].hwirq = hwirq;
|
|
|
|
smp_mb();
|
2005-04-17 05:20:36 +07:00
|
|
|
return virq;
|
2006-07-03 18:36:01 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_create_mapping);
|
|
|
|
|
|
|
|
extern unsigned int irq_create_of_mapping(struct device_node *controller,
|
|
|
|
u32 *intspec, unsigned int intsize)
|
|
|
|
{
|
|
|
|
struct irq_host *host;
|
|
|
|
irq_hw_number_t hwirq;
|
|
|
|
unsigned int flags = IRQ_TYPE_NONE;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
if (controller == NULL)
|
|
|
|
host = irq_default_host;
|
|
|
|
else
|
|
|
|
host = irq_find_host(controller);
|
|
|
|
if (host == NULL)
|
|
|
|
return NO_IRQ;
|
|
|
|
|
|
|
|
/* If host has no translation, then we assume interrupt line */
|
|
|
|
if (host->ops->xlate == NULL)
|
|
|
|
hwirq = intspec[0];
|
|
|
|
else {
|
|
|
|
if (host->ops->xlate(host, controller, intspec, intsize,
|
|
|
|
&hwirq, &flags))
|
|
|
|
return NO_IRQ;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-03 18:36:01 +07:00
|
|
|
|
|
|
|
return irq_create_mapping(host, hwirq, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-03 18:36:01 +07:00
|
|
|
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-07-03 18:36:01 +07:00
|
|
|
struct of_irq oirq;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
if (of_irq_map_one(dev, index, &oirq))
|
|
|
|
return NO_IRQ;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
return irq_create_of_mapping(oirq.controller, oirq.specifier,
|
|
|
|
oirq.size);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
void irq_dispose_mapping(unsigned int virq)
|
|
|
|
{
|
|
|
|
struct irq_host *host = irq_map[virq].host;
|
|
|
|
irq_hw_number_t hwirq;
|
|
|
|
unsigned long flags;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
WARN_ON (host == NULL);
|
|
|
|
if (host == NULL)
|
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Never unmap legacy interrupts */
|
|
|
|
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
|
|
|
|
return;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* remove chip and handler */
|
|
|
|
set_irq_chip_and_handler(virq, NULL, NULL);
|
|
|
|
|
|
|
|
/* Make sure it's completed */
|
|
|
|
synchronize_irq(virq);
|
|
|
|
|
|
|
|
/* Tell the PIC about it */
|
|
|
|
if (host->ops->unmap)
|
|
|
|
host->ops->unmap(host, virq);
|
|
|
|
smp_mb();
|
|
|
|
|
|
|
|
/* Clear reverse map */
|
|
|
|
hwirq = irq_map[virq].hwirq;
|
|
|
|
switch(host->revmap_type) {
|
|
|
|
case IRQ_HOST_MAP_LINEAR:
|
|
|
|
if (hwirq < host->revmap_data.linear.size)
|
|
|
|
host->revmap_data.linear.revmap[hwirq] = IRQ_NONE;
|
|
|
|
break;
|
|
|
|
case IRQ_HOST_MAP_TREE:
|
|
|
|
/* Check if radix tree allocated yet */
|
|
|
|
if (host->revmap_data.tree.gfp_mask == 0)
|
|
|
|
break;
|
|
|
|
/* XXX radix tree not safe ! remove lock whem it becomes safe
|
|
|
|
* and use some RCU sync to make sure everything is ok before we
|
|
|
|
* can re-use that map entry
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
radix_tree_delete(&host->revmap_data.tree, hwirq);
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
break;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Destroy map */
|
|
|
|
smp_mb();
|
|
|
|
irq_map[virq].hwirq = host->inval_irq;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Set some flags */
|
|
|
|
get_irq_desc(virq)->status |= IRQ_NOREQUEST;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Free it */
|
|
|
|
irq_free_virt(virq, 1);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-03 18:36:01 +07:00
|
|
|
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int irq_find_mapping(struct irq_host *host,
|
|
|
|
irq_hw_number_t hwirq)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int hint = hwirq % irq_virq_count;
|
|
|
|
|
|
|
|
/* Look for default host if nececssary */
|
|
|
|
if (host == NULL)
|
|
|
|
host = irq_default_host;
|
|
|
|
if (host == NULL)
|
|
|
|
return NO_IRQ;
|
|
|
|
|
|
|
|
/* legacy -> bail early */
|
|
|
|
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
|
|
|
|
return hwirq;
|
|
|
|
|
|
|
|
/* Slow path does a linear search of the map */
|
|
|
|
if (hint < NUM_ISA_INTERRUPTS)
|
|
|
|
hint = NUM_ISA_INTERRUPTS;
|
|
|
|
i = hint;
|
|
|
|
do {
|
|
|
|
if (irq_map[i].host == host &&
|
|
|
|
irq_map[i].hwirq == hwirq)
|
|
|
|
return i;
|
|
|
|
i++;
|
|
|
|
if (i >= irq_virq_count)
|
|
|
|
i = NUM_ISA_INTERRUPTS;
|
|
|
|
} while(i != hint);
|
|
|
|
return NO_IRQ;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_find_mapping);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
|
|
|
|
unsigned int irq_radix_revmap(struct irq_host *host,
|
|
|
|
irq_hw_number_t hwirq)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-07-03 18:36:01 +07:00
|
|
|
struct radix_tree_root *tree;
|
|
|
|
struct irq_map_entry *ptr;
|
|
|
|
unsigned int virq;
|
|
|
|
unsigned long flags;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
/* Check if the radix tree exist yet. We test the value of
|
|
|
|
* the gfp_mask for that. Sneaky but saves another int in the
|
|
|
|
* structure. If not, we fallback to slow mode
|
|
|
|
*/
|
|
|
|
tree = &host->revmap_data.tree;
|
|
|
|
if (tree->gfp_mask == 0)
|
|
|
|
return irq_find_mapping(host, hwirq);
|
|
|
|
|
|
|
|
/* XXX Current radix trees are NOT SMP safe !!! Remove that lock
|
|
|
|
* when that is fixed (when Nick's patch gets in
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
|
|
|
|
/* Now try to resolve */
|
|
|
|
ptr = radix_tree_lookup(tree, hwirq);
|
|
|
|
/* Found it, return */
|
|
|
|
if (ptr) {
|
|
|
|
virq = ptr - irq_map;
|
|
|
|
goto bail;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-03 18:36:01 +07:00
|
|
|
|
|
|
|
/* If not there, try to insert it */
|
|
|
|
virq = irq_find_mapping(host, hwirq);
|
|
|
|
if (virq != NO_IRQ)
|
|
|
|
radix_tree_insert(tree, virq, &irq_map[virq]);
|
|
|
|
bail:
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
return virq;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int irq_linear_revmap(struct irq_host *host,
|
|
|
|
irq_hw_number_t hwirq)
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
{
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int *revmap;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
|
|
|
|
|
|
|
|
/* Check revmap bounds */
|
|
|
|
if (unlikely(hwirq >= host->revmap_data.linear.size))
|
|
|
|
return irq_find_mapping(host, hwirq);
|
|
|
|
|
|
|
|
/* Check if revmap was allocated */
|
|
|
|
revmap = host->revmap_data.linear.revmap;
|
|
|
|
if (unlikely(revmap == NULL))
|
|
|
|
return irq_find_mapping(host, hwirq);
|
|
|
|
|
|
|
|
/* Fill up revmap with slow path if no mapping found */
|
|
|
|
if (unlikely(revmap[hwirq] == NO_IRQ))
|
|
|
|
revmap[hwirq] = irq_find_mapping(host, hwirq);
|
|
|
|
|
|
|
|
return revmap[hwirq];
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
}
|
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int irq_alloc_virt(struct irq_host *host,
|
|
|
|
unsigned int count,
|
|
|
|
unsigned int hint)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned int i, j, found = NO_IRQ;
|
|
|
|
unsigned int limit = irq_virq_count - count;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
|
|
|
|
return NO_IRQ;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
|
|
|
|
/* Use hint for 1 interrupt if any */
|
|
|
|
if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
|
|
|
|
hint < irq_virq_count && irq_map[hint].host == NULL) {
|
|
|
|
found = hint;
|
|
|
|
goto hint_found;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look for count consecutive numbers in the allocatable
|
|
|
|
* (non-legacy) space
|
|
|
|
*/
|
|
|
|
for (i = NUM_ISA_INTERRUPTS; i <= limit; ) {
|
|
|
|
for (j = i; j < (i + count); j++)
|
|
|
|
if (irq_map[j].host != NULL) {
|
|
|
|
i = j + 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
found = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (found == NO_IRQ) {
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
return NO_IRQ;
|
|
|
|
}
|
|
|
|
hint_found:
|
|
|
|
for (i = found; i < (found + count); i++) {
|
|
|
|
irq_map[i].hwirq = host->inval_irq;
|
|
|
|
smp_wmb();
|
|
|
|
irq_map[i].host = host;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
void irq_free_virt(unsigned int virq, unsigned int count)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2006-07-03 18:36:01 +07:00
|
|
|
unsigned int i;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
WARN_ON (virq < NUM_ISA_INTERRUPTS);
|
|
|
|
WARN_ON (count == 0 || (virq + count) > irq_virq_count);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
for (i = virq; i < (virq + count); i++) {
|
|
|
|
struct irq_host *host;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
if (i < NUM_ISA_INTERRUPTS ||
|
|
|
|
(virq + count) > irq_virq_count)
|
|
|
|
continue;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-03 18:36:01 +07:00
|
|
|
host = irq_map[i].host;
|
|
|
|
irq_map[i].hwirq = host->inval_irq;
|
|
|
|
smp_wmb();
|
|
|
|
irq_map[i].host = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-03 18:36:01 +07:00
|
|
|
|
|
|
|
void irq_early_init(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NR_IRQS; i++)
|
|
|
|
get_irq_desc(i)->status |= IRQ_NOREQUEST;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We need to create the radix trees late */
|
|
|
|
static int irq_late_init(void)
|
|
|
|
{
|
|
|
|
struct irq_host *h;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&irq_big_lock, flags);
|
|
|
|
list_for_each_entry(h, &irq_hosts, link) {
|
|
|
|
if (h->revmap_type == IRQ_HOST_MAP_TREE)
|
|
|
|
INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(irq_late_init);
|
|
|
|
|
|
|
|
#endif /* CONFIG_PPC_MERGE */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-08 04:15:10 +07:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
int pci_enable_msi(struct pci_dev * pdev)
|
|
|
|
{
|
|
|
|
if (ppc_md.enable_msi)
|
|
|
|
return ppc_md.enable_msi(pdev);
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_disable_msi(struct pci_dev * pdev)
|
|
|
|
{
|
|
|
|
if (ppc_md.disable_msi)
|
|
|
|
ppc_md.disable_msi(pdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_scan_msi_device(struct pci_dev *dev) {}
|
|
|
|
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
|
|
|
|
void pci_disable_msix(struct pci_dev *dev) {}
|
|
|
|
void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
|
|
|
|
void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
|
|
|
|
void pci_no_msi(void) {}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 06:06:59 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-04-17 05:20:36 +07:00
|
|
|
static int __init setup_noirqdistrib(char *str)
|
|
|
|
{
|
|
|
|
distribute_irqs = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("noirqdistrib", setup_noirqdistrib);
|
2005-11-09 14:07:45 +07:00
|
|
|
#endif /* CONFIG_PPC64 */
|