mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 13:57:37 +07:00
x86, mtrr: use stop_machine APIs for doing MTRR rendezvous
MTRR rendezvous sequence is not implemened using stop_machine() before, as this gets called both from the process context aswell as the cpu online paths (where the cpu has not come online and the interrupts are disabled etc). Now that we have a new stop_machine_from_inactive_cpu() API, use it for rendezvous during mtrr init of a logical processor that is coming online. For the rest (runtime MTRR modification, system boot, resume paths), use stop_machine() to implement the rendezvous sequence. This will consolidate and cleanup the code. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Link: http://lkml.kernel.org/r/20110623182057.076997177@sbsiddha-MOBL3.sc.intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
f740e6cd0c
commit
192d885742
@ -137,55 +137,43 @@ static void __init init_table(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct set_mtrr_data {
|
struct set_mtrr_data {
|
||||||
atomic_t count;
|
|
||||||
atomic_t gate;
|
|
||||||
unsigned long smp_base;
|
unsigned long smp_base;
|
||||||
unsigned long smp_size;
|
unsigned long smp_size;
|
||||||
unsigned int smp_reg;
|
unsigned int smp_reg;
|
||||||
mtrr_type smp_type;
|
mtrr_type smp_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
|
* mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
|
||||||
|
* by all the CPUs.
|
||||||
* @info: pointer to mtrr configuration data
|
* @info: pointer to mtrr configuration data
|
||||||
*
|
*
|
||||||
* Returns nothing.
|
* Returns nothing.
|
||||||
*/
|
*/
|
||||||
static int mtrr_work_handler(void *info)
|
static int mtrr_rendezvous_handler(void *info)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
struct set_mtrr_data *data = info;
|
struct set_mtrr_data *data = info;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
atomic_dec(&data->count);
|
/*
|
||||||
while (!atomic_read(&data->gate))
|
* We use this same function to initialize the mtrrs during boot,
|
||||||
cpu_relax();
|
* resume, runtime cpu online and on an explicit request to set a
|
||||||
|
* specific MTRR.
|
||||||
local_irq_save(flags);
|
*
|
||||||
|
* During boot or suspend, the state of the boot cpu's mtrrs has been
|
||||||
atomic_dec(&data->count);
|
* saved, and we want to replicate that across all the cpus that come
|
||||||
while (atomic_read(&data->gate))
|
* online (either at the end of boot or resume or during a runtime cpu
|
||||||
cpu_relax();
|
* online). If we're doing that, @reg is set to something special and on
|
||||||
|
* all the cpu's we do mtrr_if->set_all() (On the logical cpu that
|
||||||
/* The master has cleared me to execute */
|
* started the boot/resume sequence, this might be a duplicate
|
||||||
|
* set_all()).
|
||||||
|
*/
|
||||||
if (data->smp_reg != ~0U) {
|
if (data->smp_reg != ~0U) {
|
||||||
mtrr_if->set(data->smp_reg, data->smp_base,
|
mtrr_if->set(data->smp_reg, data->smp_base,
|
||||||
data->smp_size, data->smp_type);
|
data->smp_size, data->smp_type);
|
||||||
} else if (mtrr_aps_delayed_init) {
|
} else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
|
||||||
/*
|
|
||||||
* Initialize the MTRRs inaddition to the synchronisation.
|
|
||||||
*/
|
|
||||||
mtrr_if->set_all();
|
mtrr_if->set_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_dec(&data->count);
|
|
||||||
while (!atomic_read(&data->gate))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
atomic_dec(&data->count);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -223,20 +211,11 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
|||||||
* 14. Wait for buddies to catch up
|
* 14. Wait for buddies to catch up
|
||||||
* 15. Enable interrupts.
|
* 15. Enable interrupts.
|
||||||
*
|
*
|
||||||
* What does that mean for us? Well, first we set data.count to the number
|
* What does that mean for us? Well, stop_machine() will ensure that
|
||||||
* of CPUs. As each CPU announces that it started the rendezvous handler by
|
* the rendezvous handler is started on each CPU. And in lockstep they
|
||||||
* decrementing the count, We reset data.count and set the data.gate flag
|
* do the state transition of disabling interrupts, updating MTRR's
|
||||||
* allowing all the cpu's to proceed with the work. As each cpu disables
|
* (the CPU vendors may each do it differently, so we call mtrr_if->set()
|
||||||
* interrupts, it'll decrement data.count once. We wait until it hits 0 and
|
* callback and let them take care of it.) and enabling interrupts.
|
||||||
* proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
|
|
||||||
* are waiting for that flag to be cleared. Once it's cleared, each
|
|
||||||
* CPU goes through the transition of updating MTRRs.
|
|
||||||
* The CPU vendors may each do it differently,
|
|
||||||
* so we call mtrr_if->set() callback and let them take care of it.
|
|
||||||
* When they're done, they again decrement data->count and wait for data.gate
|
|
||||||
* to be set.
|
|
||||||
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
|
|
||||||
* Everyone then enables interrupts and we all continue on.
|
|
||||||
*
|
*
|
||||||
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
|
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
|
||||||
* becomes nops.
|
* becomes nops.
|
||||||
@ -244,115 +223,26 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
|||||||
static void
|
static void
|
||||||
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
|
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
|
||||||
{
|
{
|
||||||
struct set_mtrr_data data;
|
struct set_mtrr_data data = { .smp_reg = reg,
|
||||||
unsigned long flags;
|
.smp_base = base,
|
||||||
int cpu;
|
.smp_size = size,
|
||||||
|
.smp_type = type
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
|
||||||
/*
|
}
|
||||||
* If this cpu is not yet active, we are in the cpu online path. There
|
|
||||||
* can be no stop_machine() in parallel, as stop machine ensures this
|
|
||||||
* by using get_online_cpus(). We can skip taking the stop_cpus_mutex,
|
|
||||||
* as we don't need it and also we can't afford to block while waiting
|
|
||||||
* for the mutex.
|
|
||||||
*
|
|
||||||
* If this cpu is active, we need to prevent stop_machine() happening
|
|
||||||
* in parallel by taking the stop cpus mutex.
|
|
||||||
*
|
|
||||||
* Also, this is called in the context of cpu online path or in the
|
|
||||||
* context where cpu hotplug is prevented. So checking the active status
|
|
||||||
* of the raw_smp_processor_id() is safe.
|
|
||||||
*/
|
|
||||||
if (cpu_active(raw_smp_processor_id()))
|
|
||||||
mutex_lock(&stop_cpus_mutex);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
preempt_disable();
|
static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
|
||||||
|
unsigned long size, mtrr_type type)
|
||||||
|
{
|
||||||
|
struct set_mtrr_data data = { .smp_reg = reg,
|
||||||
|
.smp_base = base,
|
||||||
|
.smp_size = size,
|
||||||
|
.smp_type = type
|
||||||
|
};
|
||||||
|
|
||||||
data.smp_reg = reg;
|
stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
|
||||||
data.smp_base = base;
|
cpu_callout_mask);
|
||||||
data.smp_size = size;
|
|
||||||
data.smp_type = type;
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
|
|
||||||
/* Make sure data.count is visible before unleashing other CPUs */
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 0);
|
|
||||||
|
|
||||||
/* Start the ball rolling on other CPUs */
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
|
|
||||||
|
|
||||||
if (cpu == smp_processor_id())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
/* Ok, reset count and toggle gate */
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 1);
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
/* Ok, reset count and toggle gate */
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 0);
|
|
||||||
|
|
||||||
/* Do our MTRR business */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* HACK!
|
|
||||||
*
|
|
||||||
* We use this same function to initialize the mtrrs during boot,
|
|
||||||
* resume, runtime cpu online and on an explicit request to set a
|
|
||||||
* specific MTRR.
|
|
||||||
*
|
|
||||||
* During boot or suspend, the state of the boot cpu's mtrrs has been
|
|
||||||
* saved, and we want to replicate that across all the cpus that come
|
|
||||||
* online (either at the end of boot or resume or during a runtime cpu
|
|
||||||
* online). If we're doing that, @reg is set to something special and on
|
|
||||||
* this cpu we still do mtrr_if->set_all(). During boot/resume, this
|
|
||||||
* is unnecessary if at this point we are still on the cpu that started
|
|
||||||
* the boot/resume sequence. But there is no guarantee that we are still
|
|
||||||
* on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
|
|
||||||
* sure that we are in sync with everyone else.
|
|
||||||
*/
|
|
||||||
if (reg != ~0U)
|
|
||||||
mtrr_if->set(reg, base, size, type);
|
|
||||||
else
|
|
||||||
mtrr_if->set_all();
|
|
||||||
|
|
||||||
/* Wait for the others */
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
|
||||||
smp_wmb();
|
|
||||||
atomic_set(&data.gate, 1);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait here for everyone to have seen the gate change
|
|
||||||
* So we're the last ones to touch 'data'
|
|
||||||
*/
|
|
||||||
while (atomic_read(&data.count))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
preempt_enable();
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (cpu_active(raw_smp_processor_id()))
|
|
||||||
mutex_unlock(&stop_cpus_mutex);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -806,7 +696,7 @@ void mtrr_ap_init(void)
|
|||||||
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
|
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
|
||||||
* lock to prevent mtrr entry changes
|
* lock to prevent mtrr entry changes
|
||||||
*/
|
*/
|
||||||
set_mtrr(~0U, 0, 0, 0);
|
set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -27,8 +27,6 @@ struct cpu_stop_work {
|
|||||||
struct cpu_stop_done *done;
|
struct cpu_stop_done *done;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct mutex stop_cpus_mutex;
|
|
||||||
|
|
||||||
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
|
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
|
||||||
void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
||||||
struct cpu_stop_work *work_buf);
|
struct cpu_stop_work *work_buf);
|
||||||
|
@ -132,8 +132,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
|||||||
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
|
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_MUTEX(stop_cpus_mutex);
|
|
||||||
/* static data for stop_cpus */
|
/* static data for stop_cpus */
|
||||||
|
static DEFINE_MUTEX(stop_cpus_mutex);
|
||||||
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
||||||
|
|
||||||
static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
||||||
|
Loading…
Reference in New Issue
Block a user