mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 22:56:38 +07:00
x86: change x86_64 smp_call_function_mask to look alike i386
the two versions (the inner version, and the outer version, that takes the locks) of smp_call_function_mask are made into one. With the changes, i386 and x86_64 versions look exactly the same. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3a36d1e435
commit
2513926c28
@ -583,7 +583,7 @@ native_smp_call_function_mask(cpumask_t mask,
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
call_data = &data;
|
||||
mb();
|
||||
wmb();
|
||||
|
||||
/* Send a message to other CPUs */
|
||||
if (cpus_equal(mask, allbutself))
|
||||
|
@ -354,26 +354,30 @@ static void __smp_call_function(void (*func) (void *info), void *info,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* this function sends a 'generic call function' IPI to all other CPU
|
||||
* of the system defined in the mask.
|
||||
*/
|
||||
static int __smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
int native_smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
cpumask_t allbutself;
|
||||
int cpus;
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
/* Holding any lock stops cpus from going down. */
|
||||
spin_lock(&call_lock);
|
||||
|
||||
allbutself = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), allbutself);
|
||||
|
||||
cpus_and(mask, mask, allbutself);
|
||||
cpus = cpus_weight(mask);
|
||||
|
||||
if (!cpus)
|
||||
if (!cpus) {
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
@ -395,43 +399,14 @@ static int __smp_call_function_mask(cpumask_t mask,
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (!wait)
|
||||
return 0;
|
||||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
spin_unlock(&call_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on. Must not include the current cpu.
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
* @info: An arbitrary pointer to pass to the function.
|
||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
||||
*
|
||||
* Returns 0 on success, else a negative status code.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned; otherwise
|
||||
* it returns just before the target cpu calls @func.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int native_smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
spin_lock(&call_lock);
|
||||
ret = __smp_call_function_mask(mask, func, info, wait);
|
||||
spin_unlock(&call_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stop_this_cpu(void *dummy)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user