mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 15:26:41 +07:00
bd024e82e4
Although mmiowb() is concerned only with serialising MMIO writes occuring in contexts where a spinlock is held, the call to mmiowb_set_pending() from the MMIO write accessors can occur in preemptible contexts, such as during driver probe() functions where ordering between CPUs is not usually a concern, assuming that the task migration path provides the necessary ordering guarantees. Unfortunately, the default implementation of mmiowb_set_pending() is not preempt-safe, as it makes use of a a per-cpu variable to track its internal state. This has been reported to generate the following splat on riscv: | BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1 | caller is regmap_mmio_write32le+0x1c/0x46 | CPU: 3 PID: 1 Comm: swapper/0 Not tainted 5.8.0-rc3-hfu+ #1 | Call Trace: | walk_stackframe+0x0/0x7a | dump_stack+0x6e/0x88 | regmap_mmio_write32le+0x18/0x46 | check_preemption_disabled+0xa4/0xaa | regmap_mmio_write32le+0x18/0x46 | regmap_mmio_write+0x26/0x44 | regmap_write+0x28/0x48 | sifive_gpio_probe+0xc0/0x1da Although it's possible to fix the driver in this case, other splats have been seen from other drivers, including the infamous 8250 UART, and so it's better to address this problem in the mmiowb core itself. Fix mmiowb_set_pending() by using the raw_cpu_ptr() to get at the mmiowb state and then only updating the 'mmiowb_pending' field if we are not preemptible (i.e. we have a non-zero nesting count). Cc: Arnd Bergmann <arnd@arndb.de> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Guo Ren <guoren@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Reported-by: Palmer Dabbelt <palmer@dabbelt.com> Reported-by: Emil Renner Berthing <kernel@esmil.dk> Tested-by: Emil Renner Berthing <kernel@esmil.dk> Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com> Acked-by: Palmer Dabbelt <palmerdabbelt@google.com> Link: https://lore.kernel.org/r/20200716112816.7356-1-will@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
66 lines
1.7 KiB
C
66 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_GENERIC_MMIOWB_H
|
|
#define __ASM_GENERIC_MMIOWB_H
|
|
|
|
/*
|
|
* Generic implementation of mmiowb() tracking for spinlocks.
|
|
*
|
|
* If your architecture doesn't ensure that writes to an I/O peripheral
|
|
* within two spinlocked sections on two different CPUs are seen by the
|
|
* peripheral in the order corresponding to the lock handover, then you
|
|
* need to follow these FIVE easy steps:
|
|
*
|
|
* 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
|
|
* in asm/mmiowb.h, then #include this file
|
|
* 2. Ensure your I/O write accessors call mmiowb_set_pending()
|
|
* 3. Select ARCH_HAS_MMIOWB
|
|
* 4. Untangle the resulting mess of header files
|
|
* 5. Complain to your architects
|
|
*/
|
|
#ifdef CONFIG_MMIOWB
|
|
|
|
#include <linux/compiler.h>
|
|
#include <asm-generic/mmiowb_types.h>
|
|
|
|
#ifndef arch_mmiowb_state
|
|
#include <asm/percpu.h>
|
|
#include <asm/smp.h>
|
|
|
|
DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
|
|
#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
|
|
#else
|
|
#define __mmiowb_state() arch_mmiowb_state()
|
|
#endif /* arch_mmiowb_state */
|
|
|
|
static inline void mmiowb_set_pending(void)
|
|
{
|
|
struct mmiowb_state *ms = __mmiowb_state();
|
|
|
|
if (likely(ms->nesting_count))
|
|
ms->mmiowb_pending = ms->nesting_count;
|
|
}
|
|
|
|
static inline void mmiowb_spin_lock(void)
|
|
{
|
|
struct mmiowb_state *ms = __mmiowb_state();
|
|
ms->nesting_count++;
|
|
}
|
|
|
|
static inline void mmiowb_spin_unlock(void)
|
|
{
|
|
struct mmiowb_state *ms = __mmiowb_state();
|
|
|
|
if (unlikely(ms->mmiowb_pending)) {
|
|
ms->mmiowb_pending = 0;
|
|
mmiowb();
|
|
}
|
|
|
|
ms->nesting_count--;
|
|
}
|
|
#else
|
|
#define mmiowb_set_pending() do { } while (0)
|
|
#define mmiowb_spin_lock() do { } while (0)
|
|
#define mmiowb_spin_unlock() do { } while (0)
|
|
#endif /* CONFIG_MMIOWB */
|
|
#endif /* __ASM_GENERIC_MMIOWB_H */
|