2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2009-02-12 20:54:53 +07:00
|
|
|
/*
|
|
|
|
* Author: Kumar Gala <galak@kernel.crashing.org>
|
|
|
|
*
|
|
|
|
* Copyright 2009 Freescale Semiconductor Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/threads.h>
|
powerpc: Consolidate ipi message mux and demux
Consolidate the mux and demux of ipi messages into smp.c and call
a new smp_ops callback to actually trigger the ipi.
The powerpc architecture code is optimised for having 4 distinct
ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi
single, scheduler ipi, and enter debugger). However, several interrupt
controllers only provide a single software triggered interrupt that
can be delivered to each cpu. To resolve this limitation, each smp_ops
implementation created a per-cpu variable that is manipulated with atomic
bitops. Since these lines will be contended they are optimialy marked as
shared_aligned and take a full cache line for each cpu. Distro kernels
may have 2 or 3 of these in their config, each taking per-cpu space
even though at most one will be in use.
This consolidation removes smp_message_recv and replaces the single call
actions cases with direct calls from the common message recognition loop.
The complicated debugger ipi case with its muxed crash handling code is
moved to debug_ipi_action which is now called from the demux code (instead
of the multi-message action calling smp_message_recv).
I put a call to reschedule_action to increase the likelyhood of correctly
merging the anticipated scheduler_ipi() hook coming from the scheduler
tree; that single required call can be inlined later.
The actual message decode is a copy of the old pseries xics code with its
memory barriers and cache line spacing, augmented with a per-cpu unsigned
long based on the book-e doorbell code. The optional data is set via a
callback from the implementation and is passed to the new cause-ipi hook
along with the logical cpu number. While currently only the doorbell
implemntation uses this data it should be almost zero cost to retrieve and
pass it -- it adds a single register load for the argument from the same
cache line to which we just completed a store and the register is dead
on return from the call. I extended the data element from unsigned int
to unsigned long in case some other code wanted to associate a pointer.
The doorbell check_self is replaced by a call to smp_muxed_ipi_resend,
conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed
to CONFIG_SMP but I left it with BOOKE for now.
Also, the doorbell interrupt vector for book-e was not calling irq_enter
and irq_exit, which throws off cpu accounting and causes code to not
realize it is running in interrupt context. Add the missing calls.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-05-11 02:29:39 +07:00
|
|
|
#include <linux/hardirq.h>
|
2009-02-12 20:54:53 +07:00
|
|
|
|
|
|
|
#include <asm/dbell.h>
|
2010-07-09 12:32:30 +07:00
|
|
|
#include <asm/irq_regs.h>
|
2015-03-19 15:29:01 +07:00
|
|
|
#include <asm/kvm_ppc.h>
|
2018-10-04 13:23:37 +07:00
|
|
|
#include <asm/trace.h>
|
2009-02-12 20:54:53 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2017-04-13 17:16:21 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Doorbells must only be used if CPU_FTR_DBELL is available.
|
|
|
|
* msgsnd is used in HV, and msgsndp is used in !HV.
|
|
|
|
*
|
|
|
|
* These should be used by platform code that is aware of restrictions.
|
|
|
|
* Other arch code should use ->cause_ipi.
|
|
|
|
*
|
|
|
|
* doorbell_global_ipi() sends a dbell to any target CPU.
|
|
|
|
* Must be used only by architectures that address msgsnd target
|
|
|
|
* by PIR/get_hard_smp_processor_id.
|
|
|
|
*/
|
|
|
|
void doorbell_global_ipi(int cpu)
|
2010-07-09 12:29:53 +07:00
|
|
|
{
|
2017-04-13 17:16:21 +07:00
|
|
|
u32 tag = get_hard_smp_processor_id(cpu);
|
2010-07-09 12:29:53 +07:00
|
|
|
|
2017-04-13 17:16:21 +07:00
|
|
|
kvmppc_set_host_ipi(cpu, 1);
|
|
|
|
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
2017-04-13 17:16:22 +07:00
|
|
|
ppc_msgsnd_sync();
|
2017-04-13 17:16:21 +07:00
|
|
|
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
|
2010-07-09 12:29:53 +07:00
|
|
|
}
|
|
|
|
|
2017-04-13 17:16:21 +07:00
|
|
|
/*
|
|
|
|
* doorbell_core_ipi() sends a dbell to a target CPU in the same core.
|
|
|
|
* Must be used only by architectures that address msgsnd target
|
|
|
|
* by TIR/cpu_thread_in_core.
|
|
|
|
*/
|
|
|
|
void doorbell_core_ipi(int cpu)
|
2009-02-12 20:54:53 +07:00
|
|
|
{
|
2017-04-13 17:16:21 +07:00
|
|
|
u32 tag = cpu_thread_in_core(cpu);
|
|
|
|
|
|
|
|
kvmppc_set_host_ipi(cpu, 1);
|
2012-09-05 01:33:08 +07:00
|
|
|
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
2017-04-13 17:16:22 +07:00
|
|
|
ppc_msgsnd_sync();
|
2017-04-13 17:16:21 +07:00
|
|
|
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to cause a core doorbell if destination is on the same core.
|
|
|
|
* Returns 1 on success, 0 on failure.
|
|
|
|
*/
|
|
|
|
int doorbell_try_core_ipi(int cpu)
|
|
|
|
{
|
|
|
|
int this_cpu = get_cpu();
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
|
|
|
|
doorbell_core_ipi(cpu);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
put_cpu();
|
|
|
|
|
|
|
|
return ret;
|
2009-02-12 20:54:53 +07:00
|
|
|
}
|
2010-07-09 12:25:18 +07:00
|
|
|
|
|
|
|
void doorbell_exception(struct pt_regs *regs)
|
|
|
|
{
|
2010-07-09 12:32:30 +07:00
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
2010-07-09 12:25:18 +07:00
|
|
|
|
powerpc: Consolidate ipi message mux and demux
Consolidate the mux and demux of ipi messages into smp.c and call
a new smp_ops callback to actually trigger the ipi.
The powerpc architecture code is optimised for having 4 distinct
ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi
single, scheduler ipi, and enter debugger). However, several interrupt
controllers only provide a single software triggered interrupt that
can be delivered to each cpu. To resolve this limitation, each smp_ops
implementation created a per-cpu variable that is manipulated with atomic
bitops. Since these lines will be contended they are optimialy marked as
shared_aligned and take a full cache line for each cpu. Distro kernels
may have 2 or 3 of these in their config, each taking per-cpu space
even though at most one will be in use.
This consolidation removes smp_message_recv and replaces the single call
actions cases with direct calls from the common message recognition loop.
The complicated debugger ipi case with its muxed crash handling code is
moved to debug_ipi_action which is now called from the demux code (instead
of the multi-message action calling smp_message_recv).
I put a call to reschedule_action to increase the likelyhood of correctly
merging the anticipated scheduler_ipi() hook coming from the scheduler
tree; that single required call can be inlined later.
The actual message decode is a copy of the old pseries xics code with its
memory barriers and cache line spacing, augmented with a per-cpu unsigned
long based on the book-e doorbell code. The optional data is set via a
callback from the implementation and is passed to the new cause-ipi hook
along with the logical cpu number. While currently only the doorbell
implemntation uses this data it should be almost zero cost to retrieve and
pass it -- it adds a single register load for the argument from the same
cache line to which we just completed a store and the register is dead
on return from the call. I extended the data element from unsigned int
to unsigned long in case some other code wanted to associate a pointer.
The doorbell check_self is replaced by a call to smp_muxed_ipi_resend,
conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed
to CONFIG_SMP but I left it with BOOKE for now.
Also, the doorbell interrupt vector for book-e was not calling irq_enter
and irq_exit, which throws off cpu accounting and causes code to not
realize it is running in interrupt context. Add the missing calls.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-05-11 02:29:39 +07:00
|
|
|
irq_enter();
|
2018-10-04 13:23:37 +07:00
|
|
|
trace_doorbell_entry(regs);
|
2010-07-09 12:29:53 +07:00
|
|
|
|
2017-04-13 17:16:22 +07:00
|
|
|
ppc_msgsync();
|
|
|
|
|
powerpc: Rework lazy-interrupt handling
The current implementation of lazy interrupts handling has some
issues that this tries to address.
We don't do the various workarounds we need to do when re-enabling
interrupts in some cases such as when returning from an interrupt
and thus we may still lose or get delayed decrementer or doorbell
interrupts.
The current scheme also makes it much harder to handle the external
"edge" interrupts provided by some BookE processors when using the
EPR facility (External Proxy) and the Freescale Hypervisor.
Additionally, we tend to keep interrupts hard disabled in a number
of cases, such as decrementer interrupts, external interrupts, or
when a masked decrementer interrupt is pending. This is sub-optimal.
This is an attempt at fixing it all in one go by reworking the way
we do the lazy interrupt disabling from the ground up.
The base idea is to replace the "hard_enabled" field with a
"irq_happened" field in which we store a bit mask of what interrupt
occurred while soft-disabled.
When re-enabling, either via arch_local_irq_restore() or when returning
from an interrupt, we can now decide what to do by testing bits in that
field.
We then implement replaying of the missed interrupts either by
re-using the existing exception frame (in exception exit case) or via
the creation of a new one from an assembly trampoline (in the
arch_local_irq_enable case).
This removes the need to play with the decrementer to try to create
fake interrupts, among others.
In addition, this adds a few refinements:
- We no longer hard disable decrementer interrupts that occur
while soft-disabled. We now simply bump the decrementer back to max
(on BookS) or leave it stopped (on BookE) and continue with hard interrupts
enabled, which means that we'll potentially get better sample quality from
performance monitor interrupts.
- Timer, decrementer and doorbell interrupts now hard-enable
shortly after removing the source of the interrupt, which means
they no longer run entirely hard disabled. Again, this will improve
perf sample quality.
- On Book3E 64-bit, we now make the performance monitor interrupt
act as an NMI like Book3S (the necessary C code for that to work
appear to already be present in the FSL perf code, notably calling
nmi_enter instead of irq_enter). (This also fixes a bug where BookE
perfmon interrupts could clobber r14 ... oops)
- We could make "masked" decrementer interrupts act as NMIs when doing
timer-based perf sampling to improve the sample quality.
Signed-off-by-yet: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
v2:
- Add hard-enable to decrementer, timer and doorbells
- Fix CR clobber in masked irq handling on BookE
- Make embedded perf interrupt act as an NMI
- Add a PACA_HAPPENED_EE_EDGE for use by FSL if they want
to retrigger an interrupt without preventing hard-enable
v3:
- Fix or vs. ori bug on Book3E
- Fix enabling of interrupts for some exceptions on Book3E
v4:
- Fix resend of doorbells on return from interrupt on Book3E
v5:
- Rebased on top of my latest series, which involves some significant
rework of some aspects of the patch.
v6:
- 32-bit compile fix
- more compile fixes with various .config combos
- factor out the asm code to soft-disable interrupts
- remove the C wrapper around preempt_schedule_irq
v7:
- Fix a bug with hard irq state tracking on native power7
2012-03-06 14:27:59 +07:00
|
|
|
may_hard_irq_enable();
|
|
|
|
|
2015-03-19 15:29:01 +07:00
|
|
|
kvmppc_set_host_ipi(smp_processor_id(), 0);
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 03:23:25 +07:00
|
|
|
__this_cpu_inc(irq_stat.doorbell_irqs);
|
2013-03-22 02:22:52 +07:00
|
|
|
|
2017-04-13 17:16:22 +07:00
|
|
|
smp_ipi_demux_relaxed(); /* already performed the barrier */
|
2010-07-09 12:25:18 +07:00
|
|
|
|
2018-10-04 13:23:37 +07:00
|
|
|
trace_doorbell_exit(regs);
|
powerpc: Consolidate ipi message mux and demux
Consolidate the mux and demux of ipi messages into smp.c and call
a new smp_ops callback to actually trigger the ipi.
The powerpc architecture code is optimised for having 4 distinct
ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi
single, scheduler ipi, and enter debugger). However, several interrupt
controllers only provide a single software triggered interrupt that
can be delivered to each cpu. To resolve this limitation, each smp_ops
implementation created a per-cpu variable that is manipulated with atomic
bitops. Since these lines will be contended they are optimialy marked as
shared_aligned and take a full cache line for each cpu. Distro kernels
may have 2 or 3 of these in their config, each taking per-cpu space
even though at most one will be in use.
This consolidation removes smp_message_recv and replaces the single call
actions cases with direct calls from the common message recognition loop.
The complicated debugger ipi case with its muxed crash handling code is
moved to debug_ipi_action which is now called from the demux code (instead
of the multi-message action calling smp_message_recv).
I put a call to reschedule_action to increase the likelyhood of correctly
merging the anticipated scheduler_ipi() hook coming from the scheduler
tree; that single required call can be inlined later.
The actual message decode is a copy of the old pseries xics code with its
memory barriers and cache line spacing, augmented with a per-cpu unsigned
long based on the book-e doorbell code. The optional data is set via a
callback from the implementation and is passed to the new cause-ipi hook
along with the logical cpu number. While currently only the doorbell
implemntation uses this data it should be almost zero cost to retrieve and
pass it -- it adds a single register load for the argument from the same
cache line to which we just completed a store and the register is dead
on return from the call. I extended the data element from unsigned int
to unsigned long in case some other code wanted to associate a pointer.
The doorbell check_self is replaced by a call to smp_muxed_ipi_resend,
conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed
to CONFIG_SMP but I left it with BOOKE for now.
Also, the doorbell interrupt vector for book-e was not calling irq_enter
and irq_exit, which throws off cpu accounting and causes code to not
realize it is running in interrupt context. Add the missing calls.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-05-11 02:29:39 +07:00
|
|
|
irq_exit();
|
2010-07-09 12:32:30 +07:00
|
|
|
set_irq_regs(old_regs);
|
2010-07-09 12:25:18 +07:00
|
|
|
}
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
void doorbell_exception(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
printk(KERN_WARNING "Received doorbell on non-smp system\n");
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|