2005-04-17 05:20:36 +07:00
|
|
|
#ifndef __LINUX_SMP_H
|
|
|
|
#define __LINUX_SMP_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic SMP support
|
|
|
|
* Alan Cox. <alan@redhat.com>
|
|
|
|
*/
|
|
|
|
|
2007-05-17 12:11:09 +07:00
|
|
|
#include <linux/errno.h>
|
2008-09-24 12:15:57 +07:00
|
|
|
#include <linux/types.h>
|
2008-06-26 16:21:34 +07:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/cpumask.h>
|
smp: add missing init.h include
Commit 34db18a054c6 ("smp: move smp setup functions to kernel/smp.c")
causes this build error on s390 because of a missing init.h include:
CC arch/s390/kernel/asm-offsets.s
In file included from /home2/heicarst/linux-2.6/arch/s390/include/asm/spinlock.h:14:0,
from include/linux/spinlock.h:87,
from include/linux/seqlock.h:29,
from include/linux/time.h:8,
from include/linux/timex.h:56,
from include/linux/sched.h:57,
from arch/s390/kernel/asm-offsets.c:10:
include/linux/smp.h:117:20: error: expected '=', ',', ';', 'asm' or '__attribute__' before 'setup_nr_cpu_ids'
include/linux/smp.h:118:20: error: expected '=', ',', ';', 'asm' or '__attribute__' before 'smp_init'
Fix it by adding the include statement.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: WANG Cong <amwang@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-23 14:24:58 +07:00
|
|
|
#include <linux/init.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
extern void cpu_idle(void);
|
|
|
|
|
2010-10-27 23:28:36 +07:00
|
|
|
typedef void (*smp_call_func_t)(void *info);
|
2008-06-26 16:21:34 +07:00
|
|
|
struct call_single_data {
|
|
|
|
struct list_head list;
|
2010-10-27 23:28:36 +07:00
|
|
|
smp_call_func_t func;
|
2008-06-26 16:21:34 +07:00
|
|
|
void *info;
|
2008-09-24 12:15:57 +07:00
|
|
|
u16 flags;
|
|
|
|
u16 priv;
|
2008-06-26 16:21:34 +07:00
|
|
|
};
|
|
|
|
|
2008-12-16 11:26:48 +07:00
|
|
|
/* total number of cpus in this system (may exceed NR_CPUS) */
|
|
|
|
extern unsigned int total_cpus;
|
|
|
|
|
2010-10-27 23:28:36 +07:00
|
|
|
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
|
|
|
|
int wait);
|
2009-01-10 03:27:08 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
#include <linux/preempt.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
|
|
|
|
* (defined in asm header):
|
2009-03-13 17:14:06 +07:00
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* stops all CPUs but the current one:
|
|
|
|
*/
|
|
|
|
extern void smp_send_stop(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sends a 'reschedule' event to another CPU:
|
|
|
|
*/
|
|
|
|
extern void smp_send_reschedule(int cpu);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare machine for booting other CPUs.
|
|
|
|
*/
|
|
|
|
extern void smp_prepare_cpus(unsigned int max_cpus);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bring a CPU up
|
|
|
|
*/
|
2012-04-20 20:05:42 +07:00
|
|
|
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Final polishing of CPUs
|
|
|
|
*/
|
|
|
|
extern void smp_cpus_done(unsigned int max_cpus);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call a function on all other processors
|
|
|
|
*/
|
2010-10-27 23:28:36 +07:00
|
|
|
int smp_call_function(smp_call_func_t func, void *info, int wait);
|
2008-12-30 05:35:16 +07:00
|
|
|
void smp_call_function_many(const struct cpumask *mask,
|
2010-10-27 23:28:36 +07:00
|
|
|
smp_call_func_t func, void *info, bool wait);
|
2008-11-05 09:39:10 +07:00
|
|
|
|
2009-02-25 19:59:48 +07:00
|
|
|
void __smp_call_function_single(int cpuid, struct call_single_data *data,
|
|
|
|
int wait);
|
2008-06-26 16:21:34 +07:00
|
|
|
|
2009-11-18 05:27:27 +07:00
|
|
|
int smp_call_function_any(const struct cpumask *mask,
|
2010-10-27 23:28:36 +07:00
|
|
|
smp_call_func_t func, void *info, int wait);
|
2009-11-18 05:27:27 +07:00
|
|
|
|
2008-06-26 16:21:34 +07:00
|
|
|
/*
|
|
|
|
* Generic and arch helpers
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
2011-03-29 23:35:04 +07:00
|
|
|
void __init call_function_init(void);
|
2008-06-26 16:21:34 +07:00
|
|
|
void generic_smp_call_function_single_interrupt(void);
|
|
|
|
void generic_smp_call_function_interrupt(void);
|
|
|
|
void ipi_call_lock(void);
|
|
|
|
void ipi_call_unlock(void);
|
|
|
|
void ipi_call_lock_irq(void);
|
|
|
|
void ipi_call_unlock_irq(void);
|
2011-03-29 23:35:04 +07:00
|
|
|
#else
|
|
|
|
static inline void call_function_init(void) { }
|
2008-06-26 16:21:34 +07:00
|
|
|
#endif
|
2006-09-26 13:32:33 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Call a function on all processors
|
|
|
|
*/
|
2010-10-27 23:28:36 +07:00
|
|
|
int on_each_cpu(smp_call_func_t func, void *info, int wait);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
smp: introduce a generic on_each_cpu_mask() function
We have lots of infrastructure in place to partition multi-core systems
such that we have a group of CPUs that are dedicated to specific task:
cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter.
Still, kernel code will at times interrupt all CPUs in the system via IPIs
for various needs. These IPIs are useful and cannot be avoided
altogether, but in certain cases it is possible to interrupt only specific
CPUs that have useful work to do and not the entire system.
This patch set, inspired by discussions with Peter Zijlstra and Frederic
Weisbecker when testing the nohz task patch set, is a first stab at trying
to explore doing this by locating the places where such global IPI calls
are being made and turning the global IPI into an IPI for a specific group
of CPUs. The purpose of the patch set is to get feedback if this is the
right way to go for dealing with this issue and indeed, if the issue is
even worth dealing with at all. Based on the feedback from this patch set
I plan to offer further patches that address similar issue in other code
paths.
This patch creates an on_each_cpu_mask() and on_each_cpu_cond()
infrastructure API (the former derived from existing arch specific
versions in Tile and Arm) and uses them to turn several global IPI
invocation to per CPU group invocations.
Core kernel:
on_each_cpu_mask() calls a function on processors specified by cpumask,
which may or may not include the local processor.
You must not call this function with disabled interrupts or from a
hardware interrupt handler or from a bottom half handler.
arch/arm:
Note that the generic version is a little different then the Arm one:
1. It has the mask as first parameter
2. It calls the function on the calling CPU with interrupts disabled,
but this should be OK since the function is called on the other CPUs
with interrupts disabled anyway.
arch/tile:
The API is the same as the tile private one, but the generic version
also calls the function on the with interrupts disabled in UP case
This is OK since the function is called on the other CPUs
with interrupts disabled.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-29 04:42:43 +07:00
|
|
|
/*
|
|
|
|
* Call a function on processors specified by mask, which might include
|
|
|
|
* the local one.
|
|
|
|
*/
|
|
|
|
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
|
|
|
|
void *info, bool wait);
|
|
|
|
|
2012-03-29 04:42:43 +07:00
|
|
|
/*
|
|
|
|
* Call a function on each processor for which the supplied function
|
|
|
|
* cond_func returns a positive value. This may include the local
|
|
|
|
* processor.
|
|
|
|
*/
|
|
|
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|
|
|
smp_call_func_t func, void *info, bool wait,
|
|
|
|
gfp_t gfp_flags);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Mark the boot cpu "online" so that it can call console drivers in
|
|
|
|
* printk() and can access its per-cpu storage.
|
|
|
|
*/
|
|
|
|
void smp_prepare_boot_cpu(void);
|
|
|
|
|
2008-01-30 19:33:17 +07:00
|
|
|
extern unsigned int setup_max_cpus;
|
2011-03-23 06:34:06 +07:00
|
|
|
extern void __init setup_nr_cpu_ids(void);
|
|
|
|
extern void __init smp_init(void);
|
2008-01-30 19:33:17 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#else /* !SMP */
|
|
|
|
|
2009-03-13 17:14:06 +07:00
|
|
|
static inline void smp_send_stop(void) { }
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* These macros fold the SMP functionality into a single CPU system
|
|
|
|
*/
|
2005-06-22 07:14:34 +07:00
|
|
|
#define raw_smp_processor_id() 0
|
2010-10-27 23:28:36 +07:00
|
|
|
static inline int up_smp_call_function(smp_call_func_t func, void *info)
|
2006-03-26 16:37:19 +07:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-06-06 16:18:06 +07:00
|
|
|
#define smp_call_function(func, info, wait) \
|
2007-11-10 04:39:38 +07:00
|
|
|
(up_smp_call_function(func, info))
|
2008-05-09 14:39:44 +07:00
|
|
|
#define on_each_cpu(func,info,wait) \
|
2006-03-22 15:08:16 +07:00
|
|
|
({ \
|
|
|
|
local_irq_disable(); \
|
|
|
|
func(info); \
|
|
|
|
local_irq_enable(); \
|
|
|
|
0; \
|
|
|
|
})
|
smp: introduce a generic on_each_cpu_mask() function
We have lots of infrastructure in place to partition multi-core systems
such that we have a group of CPUs that are dedicated to specific task:
cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter.
Still, kernel code will at times interrupt all CPUs in the system via IPIs
for various needs. These IPIs are useful and cannot be avoided
altogether, but in certain cases it is possible to interrupt only specific
CPUs that have useful work to do and not the entire system.
This patch set, inspired by discussions with Peter Zijlstra and Frederic
Weisbecker when testing the nohz task patch set, is a first stab at trying
to explore doing this by locating the places where such global IPI calls
are being made and turning the global IPI into an IPI for a specific group
of CPUs. The purpose of the patch set is to get feedback if this is the
right way to go for dealing with this issue and indeed, if the issue is
even worth dealing with at all. Based on the feedback from this patch set
I plan to offer further patches that address similar issue in other code
paths.
This patch creates an on_each_cpu_mask() and on_each_cpu_cond()
infrastructure API (the former derived from existing arch specific
versions in Tile and Arm) and uses them to turn several global IPI
invocation to per CPU group invocations.
Core kernel:
on_each_cpu_mask() calls a function on processors specified by cpumask,
which may or may not include the local processor.
You must not call this function with disabled interrupts or from a
hardware interrupt handler or from a bottom half handler.
arch/arm:
Note that the generic version is a little different then the Arm one:
1. It has the mask as first parameter
2. It calls the function on the calling CPU with interrupts disabled,
but this should be OK since the function is called on the other CPUs
with interrupts disabled anyway.
arch/tile:
The API is the same as the tile private one, but the generic version
also calls the function on the with interrupts disabled in UP case
This is OK since the function is called on the other CPUs
with interrupts disabled.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-29 04:42:43 +07:00
|
|
|
/*
|
|
|
|
* Note we still need to test the mask even for UP
|
|
|
|
* because we actually can get an empty mask from
|
|
|
|
* code that on SMP might call us without the local
|
|
|
|
* CPU in the mask.
|
|
|
|
*/
|
|
|
|
#define on_each_cpu_mask(mask, func, info, wait) \
|
|
|
|
do { \
|
|
|
|
if (cpumask_test_cpu(0, (mask))) { \
|
|
|
|
local_irq_disable(); \
|
|
|
|
(func)(info); \
|
|
|
|
local_irq_enable(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2012-03-29 04:42:43 +07:00
|
|
|
/*
|
|
|
|
* Preemption is disabled here to make sure the cond_func is called under the
|
|
|
|
* same condtions in UP and SMP.
|
|
|
|
*/
|
|
|
|
#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
|
|
|
|
do { \
|
|
|
|
void *__info = (info); \
|
|
|
|
preempt_disable(); \
|
|
|
|
if ((cond_func)(0, __info)) { \
|
|
|
|
local_irq_disable(); \
|
|
|
|
(func)(__info); \
|
|
|
|
local_irq_enable(); \
|
|
|
|
} \
|
|
|
|
preempt_enable(); \
|
|
|
|
} while (0)
|
smp: introduce a generic on_each_cpu_mask() function
We have lots of infrastructure in place to partition multi-core systems
such that we have a group of CPUs that are dedicated to specific task:
cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter.
Still, kernel code will at times interrupt all CPUs in the system via IPIs
for various needs. These IPIs are useful and cannot be avoided
altogether, but in certain cases it is possible to interrupt only specific
CPUs that have useful work to do and not the entire system.
This patch set, inspired by discussions with Peter Zijlstra and Frederic
Weisbecker when testing the nohz task patch set, is a first stab at trying
to explore doing this by locating the places where such global IPI calls
are being made and turning the global IPI into an IPI for a specific group
of CPUs. The purpose of the patch set is to get feedback if this is the
right way to go for dealing with this issue and indeed, if the issue is
even worth dealing with at all. Based on the feedback from this patch set
I plan to offer further patches that address similar issue in other code
paths.
This patch creates an on_each_cpu_mask() and on_each_cpu_cond()
infrastructure API (the former derived from existing arch specific
versions in Tile and Arm) and uses them to turn several global IPI
invocation to per CPU group invocations.
Core kernel:
on_each_cpu_mask() calls a function on processors specified by cpumask,
which may or may not include the local processor.
You must not call this function with disabled interrupts or from a
hardware interrupt handler or from a bottom half handler.
arch/arm:
Note that the generic version is a little different then the Arm one:
1. It has the mask as first parameter
2. It calls the function on the calling CPU with interrupts disabled,
but this should be OK since the function is called on the other CPUs
with interrupts disabled anyway.
arch/tile:
The API is the same as the tile private one, but the generic version
also calls the function on the with interrupts disabled in UP case
This is OK since the function is called on the other CPUs
with interrupts disabled.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-29 04:42:43 +07:00
|
|
|
|
2005-07-28 15:07:41 +07:00
|
|
|
static inline void smp_send_reschedule(int cpu) { }
|
2005-07-29 00:34:47 +07:00
|
|
|
#define num_booting_cpus() 1
|
|
|
|
#define smp_prepare_boot_cpu() do {} while (0)
|
2008-12-15 15:34:35 +07:00
|
|
|
#define smp_call_function_many(mask, func, info, wait) \
|
|
|
|
(up_smp_call_function(func, info))
|
2011-03-29 23:35:04 +07:00
|
|
|
static inline void call_function_init(void) { }
|
2009-11-18 05:27:27 +07:00
|
|
|
|
|
|
|
static inline int
|
2010-10-27 23:28:36 +07:00
|
|
|
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
2009-11-18 05:27:27 +07:00
|
|
|
void *info, int wait)
|
2008-06-26 16:21:34 +07:00
|
|
|
{
|
2009-11-18 05:27:27 +07:00
|
|
|
return smp_call_function_single(0, func, info, wait);
|
2008-06-26 16:21:34 +07:00
|
|
|
}
|
2009-11-18 05:27:27 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* !SMP */
|
|
|
|
|
|
|
|
/*
|
2005-06-22 07:14:34 +07:00
|
|
|
* smp_processor_id(): get the current CPU ID.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2010-03-06 04:42:45 +07:00
|
|
|
* if DEBUG_PREEMPT is enabled then we check whether it is
|
2005-06-22 07:14:34 +07:00
|
|
|
* used in a preemption-safe way. (smp_processor_id() is safe
|
|
|
|
* if it's used in a preemption-off critical section, or in
|
|
|
|
* a thread that is bound to the current CPU.)
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2005-06-22 07:14:34 +07:00
|
|
|
* NOTE: raw_smp_processor_id() is for internal use only
|
|
|
|
* (smp_processor_id() is the preferred variant), but in rare
|
|
|
|
* instances it might also be used to turn off false positives
|
|
|
|
* (i.e. smp_processor_id() use that the debugging code reports but
|
|
|
|
* which use for some reason is legal). Don't use this to hack around
|
|
|
|
* the warning message, as your code might not work under PREEMPT.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2005-06-22 07:14:34 +07:00
|
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
|
|
extern unsigned int debug_smp_processor_id(void);
|
|
|
|
# define smp_processor_id() debug_smp_processor_id()
|
2005-04-17 05:20:36 +07:00
|
|
|
#else
|
2005-06-22 07:14:34 +07:00
|
|
|
# define smp_processor_id() raw_smp_processor_id()
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
|
|
|
|
#define put_cpu() preempt_enable()
|
|
|
|
|
2009-01-31 20:09:06 +07:00
|
|
|
/*
|
|
|
|
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
|
|
|
* boot command line:
|
|
|
|
*/
|
|
|
|
extern void arch_disable_smp_support(void);
|
|
|
|
|
2006-06-30 15:55:50 +07:00
|
|
|
void smp_setup_processor_id(void);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* __LINUX_SMP_H */
|