2019-05-19 19:08:55 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-01-10 03:27:08 +07:00
|
|
|
/*
|
|
|
|
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
|
|
|
|
*/
|
|
|
|
|
2009-01-12 22:04:37 +07:00
|
|
|
#include <linux/interrupt.h>
|
2009-01-10 03:27:08 +07:00
|
|
|
#include <linux/kernel.h>
|
2011-05-24 01:51:41 +07:00
|
|
|
#include <linux/export.h>
|
2009-01-10 03:27:08 +07:00
|
|
|
#include <linux/smp.h>
|
2016-08-29 13:48:43 +07:00
|
|
|
#include <linux/hypervisor.h>
|
2009-01-10 03:27:08 +07:00
|
|
|
|
|
|
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
|
|
int wait)
|
|
|
|
{
|
2013-09-12 04:23:25 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2020-02-05 21:34:09 +07:00
|
|
|
if (cpu != 0)
|
|
|
|
return -ENXIO;
|
2009-01-11 11:15:21 +07:00
|
|
|
|
2013-09-12 04:23:25 +07:00
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
2009-01-11 11:15:21 +07:00
|
|
|
|
2009-01-10 03:27:08 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(smp_call_function_single);
|
2013-09-12 04:23:24 +07:00
|
|
|
|
smp: Avoid using two cache lines for struct call_single_data
struct call_single_data is used in IPIs to transfer information between
CPUs. Its size is bigger than sizeof(unsigned long) and less than
cache line size. Currently it is not allocated with any explicit alignment
requirements. This makes it possible for allocated call_single_data to
cross two cache lines, which results in double the number of the cache lines
that need to be transferred among CPUs.
This can be fixed by requiring call_single_data to be aligned with the
size of call_single_data. Currently the size of call_single_data is the
power of 2. If we add new fields to call_single_data, we may need to
add padding to make sure the size of new definition is the power of 2
as well.
Fortunately, this is enforced by GCC, which will report bad sizes.
To set alignment requirements of call_single_data to the size of
call_single_data, a struct definition and a typedef is used.
To test the effect of the patch, I used the vm-scalability multiple
thread swap test case (swap-w-seq-mt). The test will create multiple
threads and each thread will eat memory until all RAM and part of swap
is used, so that huge number of IPIs are triggered when unmapping
memory. In the test, the throughput of memory writing improves ~5%
compared with misaligned call_single_data, because of faster IPIs.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Huang, Ying <ying.huang@intel.com>
[ Add call_single_data_t and align with size of call_single_data. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lu@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/87bmnqd6lz.fsf@yhuang-mobile.sh.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-08 11:30:00 +07:00
|
|
|
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
|
2013-11-15 05:32:08 +07:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
csd->func(csd->info);
|
|
|
|
local_irq_restore(flags);
|
2014-02-24 22:39:57 +07:00
|
|
|
return 0;
|
2013-11-15 05:32:08 +07:00
|
|
|
}
|
2014-02-24 22:40:02 +07:00
|
|
|
EXPORT_SYMBOL(smp_call_function_single_async);
|
2013-11-15 05:32:08 +07:00
|
|
|
|
2019-06-13 13:48:05 +07:00
|
|
|
void on_each_cpu(smp_call_func_t func, void *info, int wait)
|
2013-09-12 04:23:26 +07:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(on_each_cpu);
|
|
|
|
|
2013-09-12 04:23:24 +07:00
|
|
|
/*
|
|
|
|
* Note we still need to test the mask even for UP
|
|
|
|
* because we actually can get an empty mask from
|
|
|
|
* code that on SMP might call us without the local
|
|
|
|
* CPU in the mask.
|
|
|
|
*/
|
|
|
|
void on_each_cpu_mask(const struct cpumask *mask,
|
|
|
|
smp_call_func_t func, void *info, bool wait)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (cpumask_test_cpu(0, mask)) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(on_each_cpu_mask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Preemption is disabled here to make sure the cond_func is called under the
|
|
|
|
* same condtions in UP and SMP.
|
|
|
|
*/
|
2020-01-17 16:01:35 +07:00
|
|
|
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
2020-01-17 16:01:37 +07:00
|
|
|
void *info, bool wait, const struct cpumask *mask)
|
2013-09-12 04:23:24 +07:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
if (cond_func(0, info)) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
func(info);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
2018-09-26 10:58:41 +07:00
|
|
|
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
|
|
|
|
2020-01-17 16:01:35 +07:00
|
|
|
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
2020-01-17 16:01:37 +07:00
|
|
|
void *info, bool wait)
|
2018-09-26 10:58:41 +07:00
|
|
|
{
|
2020-01-17 16:01:37 +07:00
|
|
|
on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
|
2018-09-26 10:58:41 +07:00
|
|
|
}
|
2013-09-12 04:23:24 +07:00
|
|
|
EXPORT_SYMBOL(on_each_cpu_cond);
|
2016-08-29 13:48:44 +07:00
|
|
|
|
|
|
|
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cpu != 0)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (phys)
|
|
|
|
hypervisor_pin_vcpu(0);
|
|
|
|
ret = func(par);
|
|
|
|
if (phys)
|
|
|
|
hypervisor_pin_vcpu(-1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(smp_call_on_cpu);
|