mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 03:05:21 +07:00
0429fbc0bd
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
197 lines
4.5 KiB
C
197 lines
4.5 KiB
C
/*
|
|
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
|
*
|
|
* Provides a framework for enqueueing and running callbacks from hardirq
|
|
* context. The enqueueing is NMI-safe.
|
|
*/
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/processor.h>
|
|
|
|
|
|
static DEFINE_PER_CPU(struct llist_head, raised_list);
|
|
static DEFINE_PER_CPU(struct llist_head, lazy_list);
|
|
|
|
/*
|
|
* Claim the entry so that no one else will poke at it.
|
|
*/
|
|
static bool irq_work_claim(struct irq_work *work)
|
|
{
|
|
unsigned long flags, oflags, nflags;
|
|
|
|
/*
|
|
* Start with our best wish as a premise but only trust any
|
|
* flag value after cmpxchg() result.
|
|
*/
|
|
flags = work->flags & ~IRQ_WORK_PENDING;
|
|
for (;;) {
|
|
nflags = flags | IRQ_WORK_FLAGS;
|
|
oflags = cmpxchg(&work->flags, flags, nflags);
|
|
if (oflags == flags)
|
|
break;
|
|
if (oflags & IRQ_WORK_PENDING)
|
|
return false;
|
|
flags = oflags;
|
|
cpu_relax();
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void __weak arch_irq_work_raise(void)
|
|
{
|
|
/*
|
|
* Lame architectures will get the timer tick callback
|
|
*/
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
/*
|
|
* Enqueue the irq_work @work on @cpu unless it's already pending
|
|
* somewhere.
|
|
*
|
|
* Can be re-enqueued while the callback is still in progress.
|
|
*/
|
|
bool irq_work_queue_on(struct irq_work *work, int cpu)
|
|
{
|
|
/* All work should have been flushed before going offline */
|
|
WARN_ON_ONCE(cpu_is_offline(cpu));
|
|
|
|
/* Arch remote IPI send/receive backend aren't NMI safe */
|
|
WARN_ON_ONCE(in_nmi());
|
|
|
|
/* Only queue if not already pending */
|
|
if (!irq_work_claim(work))
|
|
return false;
|
|
|
|
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
|
|
arch_send_call_function_single_ipi(cpu);
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_queue_on);
|
|
#endif
|
|
|
|
/* Enqueue the irq work @work on the current CPU */
|
|
bool irq_work_queue(struct irq_work *work)
|
|
{
|
|
/* Only queue if not already pending */
|
|
if (!irq_work_claim(work))
|
|
return false;
|
|
|
|
/* Queue the entry and raise the IPI if needed. */
|
|
preempt_disable();
|
|
|
|
/* If the work is "lazy", handle it from next tick if any */
|
|
if (work->flags & IRQ_WORK_LAZY) {
|
|
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
|
tick_nohz_tick_stopped())
|
|
arch_irq_work_raise();
|
|
} else {
|
|
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
|
arch_irq_work_raise();
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_queue);
|
|
|
|
bool irq_work_needs_cpu(void)
|
|
{
|
|
struct llist_head *raised, *lazy;
|
|
|
|
raised = this_cpu_ptr(&raised_list);
|
|
lazy = this_cpu_ptr(&lazy_list);
|
|
|
|
if (llist_empty(raised) || arch_irq_work_has_interrupt())
|
|
if (llist_empty(lazy))
|
|
return false;
|
|
|
|
/* All work should have been flushed before going offline */
|
|
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
|
|
|
return true;
|
|
}
|
|
|
|
static void irq_work_run_list(struct llist_head *list)
|
|
{
|
|
unsigned long flags;
|
|
struct irq_work *work;
|
|
struct llist_node *llnode;
|
|
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
if (llist_empty(list))
|
|
return;
|
|
|
|
llnode = llist_del_all(list);
|
|
while (llnode != NULL) {
|
|
work = llist_entry(llnode, struct irq_work, llnode);
|
|
|
|
llnode = llist_next(llnode);
|
|
|
|
/*
|
|
* Clear the PENDING bit, after this point the @work
|
|
* can be re-used.
|
|
* Make it immediately visible so that other CPUs trying
|
|
* to claim that work don't rely on us to handle their data
|
|
* while we are in the middle of the func.
|
|
*/
|
|
flags = work->flags & ~IRQ_WORK_PENDING;
|
|
xchg(&work->flags, flags);
|
|
|
|
work->func(work);
|
|
/*
|
|
* Clear the BUSY bit and return to the free state if
|
|
* no-one else claimed it meanwhile.
|
|
*/
|
|
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* hotplug calls this through:
|
|
* hotplug_cfd() -> flush_smp_call_function_queue()
|
|
*/
|
|
void irq_work_run(void)
|
|
{
|
|
irq_work_run_list(this_cpu_ptr(&raised_list));
|
|
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_run);
|
|
|
|
void irq_work_tick(void)
|
|
{
|
|
struct llist_head *raised = &__get_cpu_var(raised_list);
|
|
|
|
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
|
|
irq_work_run_list(raised);
|
|
irq_work_run_list(&__get_cpu_var(lazy_list));
|
|
}
|
|
|
|
/*
|
|
* Synchronize against the irq_work @entry, ensures the entry is not
|
|
* currently in use.
|
|
*/
|
|
void irq_work_sync(struct irq_work *work)
|
|
{
|
|
WARN_ON_ONCE(irqs_disabled());
|
|
|
|
while (work->flags & IRQ_WORK_BUSY)
|
|
cpu_relax();
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_work_sync);
|